blob: 257b29516f69c85ee7ca3e1f272f812af65dd5c6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900130#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900131#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000132#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700133#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000134#include <linux/cpu_rmap.h>
Richard Cochran4dc360c2011-10-19 17:00:35 -0400135#include <linux/net_tstamp.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100136#include <linux/static_key.h>
Eric Dumazet4504b862011-11-28 05:23:23 +0000137#include <net/flow_keys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700139#include "net-sysfs.h"
140
Herbert Xud565b0a2008-12-15 23:38:52 -0800141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
Herbert Xu5d38a072009-01-04 16:13:40 -0800144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
150 *
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
153 *
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700158 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 * --BLG
160 *
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
173 */
174
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800175#define PTYPE_HASH_SIZE (16)
176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000179static DEFINE_SPINLOCK(offload_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700181static struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000182static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * semaphore.
187 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
190 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700191 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
194 *
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
198 *
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
201 * semaphore held.
202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204EXPORT_SYMBOL(dev_base_lock);
205
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000206seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000207
Thomas Graf4e985ad2011-06-21 03:11:20 +0000208static inline void dev_base_seq_inc(struct net *net)
209{
210 while (++net->dev_base_seq == 0);
211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric Dumazet95c96172012-04-15 05:58:06 +0000215 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
216
stephen hemminger08e98972009-11-10 07:20:34 +0000217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000225static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000226{
227#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000228 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000229#endif
230}
231
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000232static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000233{
234#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000235 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000236#endif
237}
238
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239/* Device list insertion */
240static int list_netdevice(struct net_device *dev)
241{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900242 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243
244 ASSERT_RTNL();
245
246 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800247 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000248 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000249 hlist_add_head_rcu(&dev->index_hlist,
250 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000252
253 dev_base_seq_inc(net);
254
Eric W. Biedermance286d32007-09-12 13:53:49 +0200255 return 0;
256}
257
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000258/* Device list removal
259 * caller must respect a RCU grace period before freeing/reusing dev
260 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200261static void unlist_netdevice(struct net_device *dev)
262{
263 ASSERT_RTNL();
264
265 /* Unlink dev from the device chain */
266 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800267 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000268 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000269 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200270 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000271
272 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275/*
276 * Our notifier list
277 */
278
Alan Sternf07d5b92006-05-09 15:23:03 -0700279static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281/*
282 * Device drivers call our routines to queue packets here. We empty the
283 * queue in the local softnet handler.
284 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700285
Eric Dumazet9958da02010-04-17 04:17:02 +0000286DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700287EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
David S. Millercf508b12008-07-22 14:16:42 -0700289#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290/*
David S. Millerc773e842008-07-08 23:13:53 -0700291 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292 * according to dev->type
293 */
294static const unsigned short netdev_lock_type[] =
295 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
296 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
297 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
298 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
299 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
300 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
301 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
302 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
303 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
304 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
305 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
306 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400307 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
308 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
309 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700311static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
313 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
314 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
315 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
316 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
317 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
318 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
319 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
320 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
321 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
322 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
323 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400324 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
325 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
326 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327
328static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700329static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330
331static inline unsigned short netdev_lock_pos(unsigned short dev_type)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
336 if (netdev_lock_type[i] == dev_type)
337 return i;
338 /* the last key is used by default */
339 return ARRAY_SIZE(netdev_lock_type) - 1;
340}
341
David S. Millercf508b12008-07-22 14:16:42 -0700342static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
343 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700344{
345 int i;
346
347 i = netdev_lock_pos(dev_type);
348 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
349 netdev_lock_name[i]);
350}
David S. Millercf508b12008-07-22 14:16:42 -0700351
352static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353{
354 int i;
355
356 i = netdev_lock_pos(dev->type);
357 lockdep_set_class_and_name(&dev->addr_list_lock,
358 &netdev_addr_lock_key[i],
359 netdev_lock_name[i]);
360}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700361#else
David S. Millercf508b12008-07-22 14:16:42 -0700362static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
363 unsigned short dev_type)
364{
365}
366static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700367{
368}
369#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371/*******************************************************************************
372
373 Protocol management and registration routines
374
375*******************************************************************************/
376
377/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 * Add a protocol ID to the list. Now that the input handler is
379 * smarter we can dispense with all the messy stuff that used to be
380 * here.
381 *
382 * BEWARE!!! Protocol handlers, mangling input packets,
383 * MUST BE last in hash buckets and checking protocol handlers
384 * MUST start from promiscuous ptype_all chain in net_bh.
385 * It is true now, do not change it.
386 * Explanation follows: if protocol handler, mangling packet, will
387 * be the first on list, it is not able to sense, that packet
388 * is cloned and should be copied-on-write, so that it will
389 * change it and subsequent readers will get broken packet.
390 * --ANK (980803)
391 */
392
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000393static inline struct list_head *ptype_head(const struct packet_type *pt)
394{
395 if (pt->type == htons(ETH_P_ALL))
396 return &ptype_all;
397 else
398 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/**
402 * dev_add_pack - add packet handler
403 * @pt: packet type declaration
404 *
405 * Add a protocol handler to the networking stack. The passed &packet_type
406 * is linked into kernel lists and may not be freed until it has been
407 * removed from the kernel lists.
408 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900409 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 * guarantee all CPU's that are in middle of receiving packets
411 * will see the new packet type (until the next received packet).
412 */
413
414void dev_add_pack(struct packet_type *pt)
415{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000416 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000418 spin_lock(&ptype_lock);
419 list_add_rcu(&pt->list, head);
420 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * __dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900431 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 *
433 * The packet type might still be in use by receivers
434 * and must not be freed until after all the CPU's have gone
435 * through a quiescent state.
436 */
437void __dev_remove_pack(struct packet_type *pt)
438{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000439 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 struct packet_type *pt1;
441
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000442 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 list_for_each_entry(pt1, head, list) {
445 if (pt == pt1) {
446 list_del_rcu(&pt->list);
447 goto out;
448 }
449 }
450
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000451 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000453 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700455EXPORT_SYMBOL(__dev_remove_pack);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/**
458 * dev_remove_pack - remove packet handler
459 * @pt: packet type declaration
460 *
461 * Remove a protocol handler that was previously added to the kernel
462 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
463 * from the kernel lists and can be freed or reused once this function
464 * returns.
465 *
466 * This call sleeps to guarantee that no CPU is looking at the packet
467 * type after return.
468 */
469void dev_remove_pack(struct packet_type *pt)
470{
471 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 synchronize_net();
474}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700475EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Vlad Yasevich62532da2012-11-15 08:49:10 +0000477
478/**
479 * dev_add_offload - register offload handlers
480 * @po: protocol offload declaration
481 *
482 * Add protocol offload handlers to the networking stack. The passed
483 * &proto_offload is linked into kernel lists and may not be freed until
484 * it has been removed from the kernel lists.
485 *
486 * This call does not sleep therefore it can not
487 * guarantee all CPU's that are in middle of receiving packets
488 * will see the new offload handlers (until the next received packet).
489 */
490void dev_add_offload(struct packet_offload *po)
491{
492 struct list_head *head = &offload_base;
493
494 spin_lock(&offload_lock);
495 list_add_rcu(&po->list, head);
496 spin_unlock(&offload_lock);
497}
498EXPORT_SYMBOL(dev_add_offload);
499
500/**
501 * __dev_remove_offload - remove offload handler
502 * @po: packet offload declaration
503 *
504 * Remove a protocol offload handler that was previously added to the
505 * kernel offload handlers by dev_add_offload(). The passed &offload_type
506 * is removed from the kernel lists and can be freed or reused once this
507 * function returns.
508 *
509 * The packet type might still be in use by receivers
510 * and must not be freed until after all the CPU's have gone
511 * through a quiescent state.
512 */
513void __dev_remove_offload(struct packet_offload *po)
514{
515 struct list_head *head = &offload_base;
516 struct packet_offload *po1;
517
Eric Dumazetc53aa502012-11-16 08:08:23 +0000518 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000519
520 list_for_each_entry(po1, head, list) {
521 if (po == po1) {
522 list_del_rcu(&po->list);
523 goto out;
524 }
525 }
526
527 pr_warn("dev_remove_offload: %p not found\n", po);
528out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000529 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000530}
531EXPORT_SYMBOL(__dev_remove_offload);
532
533/**
534 * dev_remove_offload - remove packet offload handler
535 * @po: packet offload declaration
536 *
537 * Remove a packet offload handler that was previously added to the kernel
538 * offload handlers by dev_add_offload(). The passed &offload_type is
539 * removed from the kernel lists and can be freed or reused once this
540 * function returns.
541 *
542 * This call sleeps to guarantee that no CPU is looking at the packet
543 * type after return.
544 */
545void dev_remove_offload(struct packet_offload *po)
546{
547 __dev_remove_offload(po);
548
549 synchronize_net();
550}
551EXPORT_SYMBOL(dev_remove_offload);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553/******************************************************************************
554
555 Device Boot-time Settings Routines
556
557*******************************************************************************/
558
559/* Boot time configuration table */
560static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
561
562/**
563 * netdev_boot_setup_add - add new setup entry
564 * @name: name of the device
565 * @map: configured settings for the device
566 *
567 * Adds new setup entry to the dev_boot_setup list. The function
568 * returns 0 on error and 1 on success. This is a generic routine to
569 * all netdevices.
570 */
571static int netdev_boot_setup_add(char *name, struct ifmap *map)
572{
573 struct netdev_boot_setup *s;
574 int i;
575
576 s = dev_boot_setup;
577 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
578 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
579 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700580 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 memcpy(&s[i].map, map, sizeof(s[i].map));
582 break;
583 }
584 }
585
586 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
587}
588
589/**
590 * netdev_boot_setup_check - check boot time settings
591 * @dev: the netdevice
592 *
593 * Check boot time settings for the device.
594 * The found settings are set for the device to be used
595 * later in the device probing.
596 * Returns 0 if no settings found, 1 if they are.
597 */
598int netdev_boot_setup_check(struct net_device *dev)
599{
600 struct netdev_boot_setup *s = dev_boot_setup;
601 int i;
602
603 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
604 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700605 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 dev->irq = s[i].map.irq;
607 dev->base_addr = s[i].map.base_addr;
608 dev->mem_start = s[i].map.mem_start;
609 dev->mem_end = s[i].map.mem_end;
610 return 1;
611 }
612 }
613 return 0;
614}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700615EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617
618/**
619 * netdev_boot_base - get address from boot time settings
620 * @prefix: prefix for network device
621 * @unit: id for network device
622 *
623 * Check boot time settings for the base address of device.
624 * The found settings are set for the device to be used
625 * later in the device probing.
626 * Returns 0 if no settings found.
627 */
628unsigned long netdev_boot_base(const char *prefix, int unit)
629{
630 const struct netdev_boot_setup *s = dev_boot_setup;
631 char name[IFNAMSIZ];
632 int i;
633
634 sprintf(name, "%s%d", prefix, unit);
635
636 /*
637 * If device already registered then return base of 1
638 * to indicate not to probe for this interface
639 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700640 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return 1;
642
643 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
644 if (!strcmp(name, s[i].name))
645 return s[i].map.base_addr;
646 return 0;
647}
648
649/*
650 * Saves at boot time configured settings for any netdevice.
651 */
652int __init netdev_boot_setup(char *str)
653{
654 int ints[5];
655 struct ifmap map;
656
657 str = get_options(str, ARRAY_SIZE(ints), ints);
658 if (!str || !*str)
659 return 0;
660
661 /* Save settings */
662 memset(&map, 0, sizeof(map));
663 if (ints[0] > 0)
664 map.irq = ints[1];
665 if (ints[0] > 1)
666 map.base_addr = ints[2];
667 if (ints[0] > 2)
668 map.mem_start = ints[3];
669 if (ints[0] > 3)
670 map.mem_end = ints[4];
671
672 /* Add new entry to the list */
673 return netdev_boot_setup_add(str, &map);
674}
675
676__setup("netdev=", netdev_boot_setup);
677
678/*******************************************************************************
679
680 Device Interface Subroutines
681
682*******************************************************************************/
683
684/**
685 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700686 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 * @name: name to find
688 *
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
694 */
695
Eric W. Biederman881d9662007-09-17 11:56:21 -0700696struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
698 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700699 struct net_device *dev;
700 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700702 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (!strncmp(dev->name, name, IFNAMSIZ))
704 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return NULL;
707}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700708EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 * dev_get_by_name_rcu - find a device by its name
712 * @net: the applicable net namespace
713 * @name: name to find
714 *
715 * Find an interface by name.
716 * If the name is found a pointer to the device is returned.
717 * If the name is not found then %NULL is returned.
718 * The reference counters are not incremented so the caller must be
719 * careful with locks. The caller must hold RCU lock.
720 */
721
722struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
723{
724 struct hlist_node *p;
725 struct net_device *dev;
726 struct hlist_head *head = dev_name_hash(net, name);
727
728 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
729 if (!strncmp(dev->name, name, IFNAMSIZ))
730 return dev;
731
732 return NULL;
733}
734EXPORT_SYMBOL(dev_get_by_name_rcu);
735
736/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700738 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * @name: name to find
740 *
741 * Find an interface by name. This can be called from any
742 * context and does its own locking. The returned handle has
743 * the usage count incremented and the caller must use dev_put() to
744 * release it when it is no longer needed. %NULL is returned if no
745 * matching device is found.
746 */
747
Eric W. Biederman881d9662007-09-17 11:56:21 -0700748struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 struct net_device *dev;
751
Eric Dumazet72c95282009-10-30 07:11:27 +0000752 rcu_read_lock();
753 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if (dev)
755 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return dev;
758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700759EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761/**
762 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700763 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * @ifindex: index of device
765 *
766 * Search for an interface by index. Returns %NULL if the device
767 * is not found or a pointer to the device. The device has not
768 * had its reference counter increased so the caller must be careful
769 * about locking. The caller must hold either the RTNL semaphore
770 * or @dev_base_lock.
771 */
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700776 struct net_device *dev;
777 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700779 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (dev->ifindex == ifindex)
781 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return NULL;
784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700785EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000787/**
788 * dev_get_by_index_rcu - find a device by its ifindex
789 * @net: the applicable net namespace
790 * @ifindex: index of device
791 *
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold RCU lock.
796 */
797
798struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
799{
800 struct hlist_node *p;
801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
803
804 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
805 if (dev->ifindex == ifindex)
806 return dev;
807
808 return NULL;
809}
810EXPORT_SYMBOL(dev_get_by_index_rcu);
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813/**
814 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700815 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns NULL if the device
819 * is not found or a pointer to the device. The device returned has
820 * had a reference added and the pointer is safe until the user calls
821 * dev_put to indicate they have finished with it.
822 */
823
Eric W. Biederman881d9662007-09-17 11:56:21 -0700824struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
826 struct net_device *dev;
827
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000828 rcu_read_lock();
829 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (dev)
831 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000832 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return dev;
834}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700835EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000838 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700839 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * @type: media type of device
841 * @ha: hardware address
842 *
843 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800844 * is not found or a pointer to the device.
845 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000846 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 * and the caller must therefore be careful about locking
848 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
850
Eric Dumazet941666c2010-12-05 01:23:53 +0000851struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
852 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 struct net_device *dev;
855
Eric Dumazet941666c2010-12-05 01:23:53 +0000856 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (dev->type == type &&
858 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700859 return dev;
860
861 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
Eric Dumazet941666c2010-12-05 01:23:53 +0000863EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300864
Eric W. Biederman881d9662007-09-17 11:56:21 -0700865struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700866{
867 struct net_device *dev;
868
869 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700870 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700871 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700872 return dev;
873
874 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700876EXPORT_SYMBOL(__dev_getfirstbyhwtype);
877
Eric W. Biederman881d9662007-09-17 11:56:21 -0700878struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000880 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000882 rcu_read_lock();
883 for_each_netdev_rcu(net, dev)
884 if (dev->type == type) {
885 dev_hold(dev);
886 ret = dev;
887 break;
888 }
889 rcu_read_unlock();
890 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892EXPORT_SYMBOL(dev_getfirstbyhwtype);
893
894/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000895 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700896 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 * @if_flags: IFF_* values
898 * @mask: bitmask of bits in if_flags to check
899 *
900 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000901 * is not found or a pointer to the device. Must be called inside
902 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 */
904
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000905struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700906 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700908 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Pavel Emelianov7562f872007-05-03 15:13:45 -0700910 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800911 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700913 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915 }
916 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000919EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/**
922 * dev_valid_name - check if name is okay for network device
923 * @name: name string
924 *
925 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700926 * to allow sysfs to work. We also disallow any kind of
927 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 */
David S. Miller95f050b2012-03-06 16:12:15 -0500929bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700931 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500932 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700933 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500934 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700935 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500936 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700937
938 while (*name) {
939 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500940 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700941 name++;
942 }
David S. Miller95f050b2012-03-06 16:12:15 -0500943 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700945EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200948 * __dev_alloc_name - allocate a name for a device
949 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200951 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 *
953 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700954 * id. It scans list of devices to build up a free map, then chooses
955 * the first empty slot. The caller must hold the dev_base or rtnl lock
956 * while allocating the name and adding the device in order to avoid
957 * duplicates.
958 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
959 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 */
961
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200962static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
964 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 const char *p;
966 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700967 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct net_device *d;
969
970 p = strnchr(name, IFNAMSIZ-1, '%');
971 if (p) {
972 /*
973 * Verify the string as this thing may have come from
974 * the user. There must be either one "%d" and no other "%"
975 * characters.
976 */
977 if (p[1] != 'd' || strchr(p + 2, '%'))
978 return -EINVAL;
979
980 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700981 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (!inuse)
983 return -ENOMEM;
984
Eric W. Biederman881d9662007-09-17 11:56:21 -0700985 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (!sscanf(d->name, name, &i))
987 continue;
988 if (i < 0 || i >= max_netdevices)
989 continue;
990
991 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (!strncmp(buf, d->name, IFNAMSIZ))
994 set_bit(i, inuse);
995 }
996
997 i = find_first_zero_bit(inuse, max_netdevices);
998 free_page((unsigned long) inuse);
999 }
1000
Octavian Purdilad9031022009-11-18 02:36:59 +00001001 if (buf != name)
1002 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001003 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* It is possible to run out of possible slots
1007 * when the name is long and there isn't enough space left
1008 * for the digits, or if all bits are used.
1009 */
1010 return -ENFILE;
1011}
1012
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013/**
1014 * dev_alloc_name - allocate a name for a device
1015 * @dev: device
1016 * @name: name format string
1017 *
1018 * Passed a format string - eg "lt%d" it will try and find a suitable
1019 * id. It scans list of devices to build up a free map, then chooses
1020 * the first empty slot. The caller must hold the dev_base or rtnl lock
1021 * while allocating the name and adding the device in order to avoid
1022 * duplicates.
1023 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1024 * Returns the number of the unit assigned or a negative errno code.
1025 */
1026
1027int dev_alloc_name(struct net_device *dev, const char *name)
1028{
1029 char buf[IFNAMSIZ];
1030 struct net *net;
1031 int ret;
1032
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001033 BUG_ON(!dev_net(dev));
1034 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001035 ret = __dev_alloc_name(net, name, buf);
1036 if (ret >= 0)
1037 strlcpy(dev->name, buf, IFNAMSIZ);
1038 return ret;
1039}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001041
Gao feng828de4f2012-09-13 20:58:27 +00001042static int dev_alloc_name_ns(struct net *net,
1043 struct net_device *dev,
1044 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001045{
Gao feng828de4f2012-09-13 20:58:27 +00001046 char buf[IFNAMSIZ];
1047 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001048
Gao feng828de4f2012-09-13 20:58:27 +00001049 ret = __dev_alloc_name(net, name, buf);
1050 if (ret >= 0)
1051 strlcpy(dev->name, buf, IFNAMSIZ);
1052 return ret;
1053}
1054
1055static int dev_get_valid_name(struct net *net,
1056 struct net_device *dev,
1057 const char *name)
1058{
1059 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001060
Octavian Purdilad9031022009-11-18 02:36:59 +00001061 if (!dev_valid_name(name))
1062 return -EINVAL;
1063
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001064 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001065 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001066 else if (__dev_get_by_name(net, name))
1067 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001068 else if (dev->name != name)
1069 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001070
1071 return 0;
1072}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074/**
1075 * dev_change_name - change name of a device
1076 * @dev: device
1077 * @newname: name (or format string) must be at least IFNAMSIZ
1078 *
1079 * Change name of a device, can pass format strings "eth%d".
1080 * for wildcarding.
1081 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001082int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Herbert Xufcc5a032007-07-30 17:03:38 -07001084 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001086 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001087 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001090 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001092 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (dev->flags & IFF_UP)
1094 return -EBUSY;
1095
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001096 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001097
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001099 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001100 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001101 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001102
Herbert Xufcc5a032007-07-30 17:03:38 -07001103 memcpy(oldname, dev->name, IFNAMSIZ);
1104
Gao feng828de4f2012-09-13 20:58:27 +00001105 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001106 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001107 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001108 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Herbert Xufcc5a032007-07-30 17:03:38 -07001111rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001112 ret = device_rename(&dev->dev, dev->name);
1113 if (ret) {
1114 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001115 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001116 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001117 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001118
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001119 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001120
Herbert Xu7f988ea2007-07-30 16:35:46 -07001121 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001122 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001123 write_unlock_bh(&dev_base_lock);
1124
1125 synchronize_rcu();
1126
1127 write_lock_bh(&dev_base_lock);
1128 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001129 write_unlock_bh(&dev_base_lock);
1130
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001131 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001132 ret = notifier_to_errno(ret);
1133
1134 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001135 /* err >= 0 after dev_alloc_name() or stores the first errno */
1136 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001137 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001138 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001139 memcpy(dev->name, oldname, IFNAMSIZ);
1140 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001141 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001142 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001143 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001144 }
1145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 return err;
1148}
1149
1150/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001151 * dev_set_alias - change ifalias of a device
1152 * @dev: device
1153 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001154 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001155 *
1156 * Set ifalias for a device,
1157 */
1158int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1159{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001160 char *new_ifalias;
1161
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001162 ASSERT_RTNL();
1163
1164 if (len >= IFALIASZ)
1165 return -EINVAL;
1166
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001167 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001168 kfree(dev->ifalias);
1169 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001170 return 0;
1171 }
1172
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001173 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1174 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001175 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001176 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001177
1178 strlcpy(dev->ifalias, alias, len+1);
1179 return len;
1180}
1181
1182
1183/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001184 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001185 * @dev: device to cause notification
1186 *
1187 * Called to indicate a device has changed features.
1188 */
1189void netdev_features_change(struct net_device *dev)
1190{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001191 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001192}
1193EXPORT_SYMBOL(netdev_features_change);
1194
1195/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 * netdev_state_change - device changes state
1197 * @dev: device to cause notification
1198 *
1199 * Called to indicate a device has changed state. This function calls
1200 * the notifier chains for netdev_chain and sends a NEWLINK message
1201 * to the routing socket.
1202 */
1203void netdev_state_change(struct net_device *dev)
1204{
1205 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001206 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1208 }
1209}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001210EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Amerigo Wangee89bab2012-08-09 22:14:56 +00001212/**
1213 * netdev_notify_peers - notify network peers about existence of @dev
1214 * @dev: network device
1215 *
1216 * Generate traffic such that interested network peers are aware of
1217 * @dev, such as by generating a gratuitous ARP. This may be used when
1218 * a device wants to inform the rest of the network about some sort of
1219 * reconfiguration such as a failover event or virtual machine
1220 * migration.
1221 */
1222void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001223{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001224 rtnl_lock();
1225 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1226 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001227}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001228EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230/**
1231 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001232 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 * @name: name of interface
1234 *
1235 * If a network interface is not present and the process has suitable
1236 * privileges this function loads the module. If module loading is not
1237 * available in this kernel then it becomes a nop.
1238 */
1239
Eric W. Biederman881d9662007-09-17 11:56:21 -07001240void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 struct net_device *dev;
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001243 int no_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Eric Dumazet72c95282009-10-30 07:11:27 +00001245 rcu_read_lock();
1246 dev = dev_get_by_name_rcu(net, name);
1247 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001249 no_module = !dev;
1250 if (no_module && capable(CAP_NET_ADMIN))
1251 no_module = request_module("netdev-%s", name);
1252 if (no_module && capable(CAP_SYS_MODULE)) {
1253 if (!request_module("%s", name))
Vinson Lee7cecb522012-06-27 14:32:07 +00001254 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1255 name);
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001258EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Patrick McHardybd380812010-02-26 06:34:53 +00001260static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001262 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001263 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001265 ASSERT_RTNL();
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 if (!netif_device_present(dev))
1268 return -ENODEV;
1269
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001270 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271 ret = notifier_to_errno(ret);
1272 if (ret)
1273 return ret;
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001276
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001277 if (ops->ndo_validate_addr)
1278 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001279
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001280 if (!ret && ops->ndo_open)
1281 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jeff Garzikbada3392007-10-23 20:19:37 -07001283 if (ret)
1284 clear_bit(__LINK_STATE_START, &dev->state);
1285 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001287 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001288 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001290 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return ret;
1294}
Patrick McHardybd380812010-02-26 06:34:53 +00001295
1296/**
1297 * dev_open - prepare an interface for use.
1298 * @dev: device to open
1299 *
1300 * Takes a device from down to up state. The device's private open
1301 * function is invoked and then the multicast lists are loaded. Finally
1302 * the device is moved into the up state and a %NETDEV_UP message is
1303 * sent to the netdev notifier chain.
1304 *
1305 * Calling this function on an active interface is a nop. On a failure
1306 * a negative errno code is returned.
1307 */
1308int dev_open(struct net_device *dev)
1309{
1310 int ret;
1311
Patrick McHardybd380812010-02-26 06:34:53 +00001312 if (dev->flags & IFF_UP)
1313 return 0;
1314
Patrick McHardybd380812010-02-26 06:34:53 +00001315 ret = __dev_open(dev);
1316 if (ret < 0)
1317 return ret;
1318
Patrick McHardybd380812010-02-26 06:34:53 +00001319 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1320 call_netdevice_notifiers(NETDEV_UP, dev);
1321
1322 return ret;
1323}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001324EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Octavian Purdila44345722010-12-13 12:44:07 +00001326static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
Octavian Purdila44345722010-12-13 12:44:07 +00001328 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001329
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001330 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001331 might_sleep();
1332
Octavian Purdila44345722010-12-13 12:44:07 +00001333 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001334 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Octavian Purdila44345722010-12-13 12:44:07 +00001336 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Octavian Purdila44345722010-12-13 12:44:07 +00001338 /* Synchronize to scheduled poll. We cannot touch poll list, it
1339 * can be even on different cpu. So just clear netif_running().
1340 *
1341 * dev->stop() will invoke napi_disable() on all of it's
1342 * napi_struct instances on this device.
1343 */
1344 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Octavian Purdila44345722010-12-13 12:44:07 +00001347 dev_deactivate_many(head);
1348
1349 list_for_each_entry(dev, head, unreg_list) {
1350 const struct net_device_ops *ops = dev->netdev_ops;
1351
1352 /*
1353 * Call the device specific close. This cannot fail.
1354 * Only if device is UP
1355 *
1356 * We allow it to be called even after a DETACH hot-plug
1357 * event.
1358 */
1359 if (ops->ndo_stop)
1360 ops->ndo_stop(dev);
1361
Octavian Purdila44345722010-12-13 12:44:07 +00001362 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001363 net_dmaengine_put();
1364 }
1365
1366 return 0;
1367}
1368
1369static int __dev_close(struct net_device *dev)
1370{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001371 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001372 LIST_HEAD(single);
1373
1374 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001375 retval = __dev_close_many(&single);
1376 list_del(&single);
1377 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001378}
1379
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001380static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001381{
1382 struct net_device *dev, *tmp;
1383 LIST_HEAD(tmp_list);
1384
1385 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1386 if (!(dev->flags & IFF_UP))
1387 list_move(&dev->unreg_list, &tmp_list);
1388
1389 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001390
Octavian Purdila44345722010-12-13 12:44:07 +00001391 list_for_each_entry(dev, head, unreg_list) {
1392 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1393 call_netdevice_notifiers(NETDEV_DOWN, dev);
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Octavian Purdila44345722010-12-13 12:44:07 +00001396 /* rollback_registered_many needs the complete original list */
1397 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399}
Patrick McHardybd380812010-02-26 06:34:53 +00001400
1401/**
1402 * dev_close - shutdown an interface.
1403 * @dev: device to shutdown
1404 *
1405 * This function moves an active device into down state. A
1406 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1407 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1408 * chain.
1409 */
1410int dev_close(struct net_device *dev)
1411{
Eric Dumazete14a5992011-05-10 12:26:06 -07001412 if (dev->flags & IFF_UP) {
1413 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001414
Eric Dumazete14a5992011-05-10 12:26:06 -07001415 list_add(&dev->unreg_list, &single);
1416 dev_close_many(&single);
1417 list_del(&single);
1418 }
Patrick McHardybd380812010-02-26 06:34:53 +00001419 return 0;
1420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001421EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001424/**
1425 * dev_disable_lro - disable Large Receive Offload on a device
1426 * @dev: device
1427 *
1428 * Disable Large Receive Offload (LRO) on a net device. Must be
1429 * called under RTNL. This is needed if received packets may be
1430 * forwarded to another interface.
1431 */
1432void dev_disable_lro(struct net_device *dev)
1433{
Neil Hormanf11970e2011-05-24 08:31:09 +00001434 /*
1435 * If we're trying to disable lro on a vlan device
1436 * use the underlying physical device instead
1437 */
1438 if (is_vlan_dev(dev))
1439 dev = vlan_dev_real_dev(dev);
1440
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001441 dev->wanted_features &= ~NETIF_F_LRO;
1442 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001443
Michał Mirosław22d59692011-04-21 12:42:15 +00001444 if (unlikely(dev->features & NETIF_F_LRO))
1445 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001446}
1447EXPORT_SYMBOL(dev_disable_lro);
1448
1449
Eric W. Biederman881d9662007-09-17 11:56:21 -07001450static int dev_boot_phase = 1;
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452/**
1453 * register_netdevice_notifier - register a network notifier block
1454 * @nb: notifier
1455 *
1456 * Register a notifier to be called when network device events occur.
1457 * The notifier passed is linked into the kernel structures and must
1458 * not be reused until it has been unregistered. A negative errno code
1459 * is returned on a failure.
1460 *
1461 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 * view of the network device list.
1464 */
1465
1466int register_netdevice_notifier(struct notifier_block *nb)
1467{
1468 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001470 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 int err;
1472
1473 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001474 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001475 if (err)
1476 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001477 if (dev_boot_phase)
1478 goto unlock;
1479 for_each_net(net) {
1480 for_each_netdev(net, dev) {
1481 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1482 err = notifier_to_errno(err);
1483 if (err)
1484 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Eric W. Biederman881d9662007-09-17 11:56:21 -07001486 if (!(dev->flags & IFF_UP))
1487 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001488
Eric W. Biederman881d9662007-09-17 11:56:21 -07001489 nb->notifier_call(nb, NETDEV_UP, dev);
1490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001492
1493unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 rtnl_unlock();
1495 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001496
1497rollback:
1498 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001499 for_each_net(net) {
1500 for_each_netdev(net, dev) {
1501 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001502 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001503
Eric W. Biederman881d9662007-09-17 11:56:21 -07001504 if (dev->flags & IFF_UP) {
1505 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1506 nb->notifier_call(nb, NETDEV_DOWN, dev);
1507 }
1508 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001510 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001511
RongQing.Li8f891482011-11-30 23:43:07 -05001512outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001513 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001514 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001516EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518/**
1519 * unregister_netdevice_notifier - unregister a network notifier block
1520 * @nb: notifier
1521 *
1522 * Unregister a notifier previously registered by
1523 * register_netdevice_notifier(). The notifier is unlinked into the
1524 * kernel structures and may then be reused. A negative errno code
1525 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001526 *
1527 * After unregistering unregister and down device events are synthesized
1528 * for all devices on the device list to the removed notifier to remove
1529 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 */
1531
1532int unregister_netdevice_notifier(struct notifier_block *nb)
1533{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001534 struct net_device *dev;
1535 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001536 int err;
1537
1538 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001539 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001540 if (err)
1541 goto unlock;
1542
1543 for_each_net(net) {
1544 for_each_netdev(net, dev) {
1545 if (dev->flags & IFF_UP) {
1546 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1547 nb->notifier_call(nb, NETDEV_DOWN, dev);
1548 }
1549 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001550 }
1551 }
1552unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001553 rtnl_unlock();
1554 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001556EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
1558/**
1559 * call_netdevice_notifiers - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001561 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 *
1563 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001564 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 */
1566
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001567int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
Jiri Pirkoab930472010-04-20 01:45:37 -07001569 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001570 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001572EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Ingo Molnarc5905af2012-02-24 08:31:31 +01001574static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001575#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001576/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001577 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001578 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001579 */
1580static atomic_t netstamp_needed_deferred;
1581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583void net_enable_timestamp(void)
1584{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001585#ifdef HAVE_JUMP_LABEL
1586 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1587
1588 if (deferred) {
1589 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001590 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001591 return;
1592 }
1593#endif
1594 WARN_ON(in_interrupt());
Ingo Molnarc5905af2012-02-24 08:31:31 +01001595 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001597EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599void net_disable_timestamp(void)
1600{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601#ifdef HAVE_JUMP_LABEL
1602 if (in_interrupt()) {
1603 atomic_inc(&netstamp_needed_deferred);
1604 return;
1605 }
1606#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001607 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001609EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Eric Dumazet3b098e22010-05-15 23:57:10 -07001611static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612{
Eric Dumazet588f0332011-11-15 04:12:55 +00001613 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001614 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001615 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
1617
Eric Dumazet588f0332011-11-15 04:12:55 +00001618#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001619 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001620 if ((COND) && !(SKB)->tstamp.tv64) \
1621 __net_timestamp(SKB); \
1622 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001623
Richard Cochran4dc360c2011-10-19 17:00:35 -04001624static int net_hwtstamp_validate(struct ifreq *ifr)
1625{
1626 struct hwtstamp_config cfg;
1627 enum hwtstamp_tx_types tx_type;
1628 enum hwtstamp_rx_filters rx_filter;
1629 int tx_type_valid = 0;
1630 int rx_filter_valid = 0;
1631
1632 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1633 return -EFAULT;
1634
1635 if (cfg.flags) /* reserved for future extensions */
1636 return -EINVAL;
1637
1638 tx_type = cfg.tx_type;
1639 rx_filter = cfg.rx_filter;
1640
1641 switch (tx_type) {
1642 case HWTSTAMP_TX_OFF:
1643 case HWTSTAMP_TX_ON:
1644 case HWTSTAMP_TX_ONESTEP_SYNC:
1645 tx_type_valid = 1;
1646 break;
1647 }
1648
1649 switch (rx_filter) {
1650 case HWTSTAMP_FILTER_NONE:
1651 case HWTSTAMP_FILTER_ALL:
1652 case HWTSTAMP_FILTER_SOME:
1653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1654 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1657 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1658 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1659 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1660 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1662 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1663 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1664 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1665 rx_filter_valid = 1;
1666 break;
1667 }
1668
1669 if (!tx_type_valid || !rx_filter_valid)
1670 return -ERANGE;
1671
1672 return 0;
1673}
1674
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001675static inline bool is_skb_forwardable(struct net_device *dev,
1676 struct sk_buff *skb)
1677{
1678 unsigned int len;
1679
1680 if (!(dev->flags & IFF_UP))
1681 return false;
1682
1683 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1684 if (skb->len <= len)
1685 return true;
1686
1687 /* if TSO is enabled, we don't care about the length as the packet
1688 * could be forwarded without being segmented before
1689 */
1690 if (skb_is_gso(skb))
1691 return true;
1692
1693 return false;
1694}
1695
Arnd Bergmann44540962009-11-26 06:07:08 +00001696/**
1697 * dev_forward_skb - loopback an skb to another netif
1698 *
1699 * @dev: destination network device
1700 * @skb: buffer to forward
1701 *
1702 * return values:
1703 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001704 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001705 *
1706 * dev_forward_skb can be used for injecting an skb from the
1707 * start_xmit function of one device into the receive queue
1708 * of another device.
1709 *
1710 * The receiving device may be in another namespace, so
1711 * we have to clear all information in the skb that could
1712 * impact namespace isolation.
1713 */
1714int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1715{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001716 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1717 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1718 atomic_long_inc(&dev->rx_dropped);
1719 kfree_skb(skb);
1720 return NET_RX_DROP;
1721 }
1722 }
1723
Arnd Bergmann44540962009-11-26 06:07:08 +00001724 skb_orphan(skb);
Ben Greearc736eef2010-07-22 09:54:47 +00001725 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001726
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001727 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001728 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001729 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001730 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001731 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001732 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001733 skb->dev = dev;
1734 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001735 skb->tstamp.tv64 = 0;
1736 skb->pkt_type = PACKET_HOST;
1737 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001738 skb->mark = 0;
1739 secpath_reset(skb);
1740 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001741 return netif_rx(skb);
1742}
1743EXPORT_SYMBOL_GPL(dev_forward_skb);
1744
Changli Gao71d9dec2010-12-15 19:57:25 +00001745static inline int deliver_skb(struct sk_buff *skb,
1746 struct packet_type *pt_prev,
1747 struct net_device *orig_dev)
1748{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001749 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1750 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001751 atomic_inc(&skb->users);
1752 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1753}
1754
Eric Leblondc0de08d2012-08-16 22:02:58 +00001755static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1756{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001757 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001758 return false;
1759
1760 if (ptype->id_match)
1761 return ptype->id_match(ptype, skb->sk);
1762 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1763 return true;
1764
1765 return false;
1766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/*
1769 * Support routine. Sends outgoing frames to any network
1770 * taps currently in use.
1771 */
1772
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001773static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001776 struct sk_buff *skb2 = NULL;
1777 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_lock();
1780 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1781 /* Never send packets back to the socket
1782 * they originated from - MvS (miquels@drinkel.ow.org)
1783 */
1784 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001785 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001786 if (pt_prev) {
1787 deliver_skb(skb2, pt_prev, skb->dev);
1788 pt_prev = ptype;
1789 continue;
1790 }
1791
1792 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (!skb2)
1794 break;
1795
Eric Dumazet70978182010-12-20 21:22:51 +00001796 net_timestamp_set(skb2);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 /* skb->nh should be correctly
1799 set by sender, so that the second statement is
1800 just protection against buggy protocols.
1801 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001802 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001804 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001805 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001806 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1807 ntohs(skb2->protocol),
1808 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001809 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001812 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001814 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001817 if (pt_prev)
1818 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rcu_read_unlock();
1820}
1821
Ben Hutchings2c530402012-07-10 10:55:09 +00001822/**
1823 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001824 * @dev: Network device
1825 * @txq: number of queues available
1826 *
1827 * If real_num_tx_queues is changed the tc mappings may no longer be
1828 * valid. To resolve this verify the tc mapping remains valid and if
1829 * not NULL the mapping. With no priorities mapping to this
1830 * offset/count pair it will no longer be used. In the worst case TC0
1831 * is invalid nothing can be done so disable priority mappings. If is
1832 * expected that drivers will fix this mapping if they can before
1833 * calling netif_set_real_num_tx_queues.
1834 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001835static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001836{
1837 int i;
1838 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1839
1840 /* If TC0 is invalidated disable TC mapping */
1841 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001842 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001843 dev->num_tc = 0;
1844 return;
1845 }
1846
1847 /* Invalidated prio to tc mappings set to TC0 */
1848 for (i = 1; i < TC_BITMASK + 1; i++) {
1849 int q = netdev_get_prio_tc_map(dev, i);
1850
1851 tc = &dev->tc_to_txq[q];
1852 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001853 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1854 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001855 netdev_set_prio_tc_map(dev, i, 0);
1856 }
1857 }
1858}
1859
Alexander Duyck537c00d2013-01-10 08:57:02 +00001860#ifdef CONFIG_XPS
1861static DEFINE_MUTEX(xps_map_mutex);
1862#define xmap_dereference(P) \
1863 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1864
1865void netif_reset_xps_queue(struct net_device *dev, u16 index)
1866{
1867 struct xps_dev_maps *dev_maps;
1868 struct xps_map *map;
1869 int i, pos, nonempty = 0;
1870
1871 mutex_lock(&xps_map_mutex);
1872 dev_maps = xmap_dereference(dev->xps_maps);
1873
1874 if (!dev_maps)
1875 goto out_no_maps;
1876
1877 for_each_possible_cpu(i) {
1878 map = xmap_dereference(dev_maps->cpu_map[i]);
1879 if (!map)
1880 continue;
1881
1882 for (pos = 0; pos < map->len; pos++)
1883 if (map->queues[pos] == index)
1884 break;
1885
1886 if (pos < map->len) {
1887 if (map->len > 1) {
1888 map->queues[pos] = map->queues[--map->len];
1889 } else {
1890 RCU_INIT_POINTER(dev_maps->cpu_map[i], NULL);
1891 kfree_rcu(map, rcu);
1892 map = NULL;
1893 }
1894 }
1895 if (map)
1896 nonempty = 1;
1897 }
1898
1899 if (!nonempty) {
1900 RCU_INIT_POINTER(dev->xps_maps, NULL);
1901 kfree_rcu(dev_maps, rcu);
1902 }
1903
1904out_no_maps:
1905 mutex_unlock(&xps_map_mutex);
1906}
1907
1908int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1909{
1910 int i, cpu, pos, map_len, alloc_len, need_set;
1911 struct xps_map *map, *new_map;
1912 struct xps_dev_maps *dev_maps, *new_dev_maps;
1913 int nonempty = 0;
1914 int numa_node_id = -2;
1915 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1916
1917 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1918 if (!new_dev_maps)
1919 return -ENOMEM;
1920
1921 mutex_lock(&xps_map_mutex);
1922
1923 dev_maps = xmap_dereference(dev->xps_maps);
1924
1925 for_each_possible_cpu(cpu) {
1926 map = dev_maps ?
1927 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1928 new_map = map;
1929 if (map) {
1930 for (pos = 0; pos < map->len; pos++)
1931 if (map->queues[pos] == index)
1932 break;
1933 map_len = map->len;
1934 alloc_len = map->alloc_len;
1935 } else
1936 pos = map_len = alloc_len = 0;
1937
1938 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1939#ifdef CONFIG_NUMA
1940 if (need_set) {
1941 if (numa_node_id == -2)
1942 numa_node_id = cpu_to_node(cpu);
1943 else if (numa_node_id != cpu_to_node(cpu))
1944 numa_node_id = -1;
1945 }
1946#endif
1947 if (need_set && pos >= map_len) {
1948 /* Need to add queue to this CPU's map */
1949 if (map_len >= alloc_len) {
1950 alloc_len = alloc_len ?
1951 2 * alloc_len : XPS_MIN_MAP_ALLOC;
1952 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1953 GFP_KERNEL,
1954 cpu_to_node(cpu));
1955 if (!new_map)
1956 goto error;
1957 new_map->alloc_len = alloc_len;
1958 for (i = 0; i < map_len; i++)
1959 new_map->queues[i] = map->queues[i];
1960 new_map->len = map_len;
1961 }
1962 new_map->queues[new_map->len++] = index;
1963 } else if (!need_set && pos < map_len) {
1964 /* Need to remove queue from this CPU's map */
1965 if (map_len > 1)
1966 new_map->queues[pos] =
1967 new_map->queues[--new_map->len];
1968 else
1969 new_map = NULL;
1970 }
1971 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1972 }
1973
1974 /* Cleanup old maps */
1975 for_each_possible_cpu(cpu) {
1976 map = dev_maps ?
1977 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1978 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1979 kfree_rcu(map, rcu);
1980 if (new_dev_maps->cpu_map[cpu])
1981 nonempty = 1;
1982 }
1983
1984 if (nonempty) {
1985 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1986 } else {
1987 kfree(new_dev_maps);
1988 RCU_INIT_POINTER(dev->xps_maps, NULL);
1989 }
1990
1991 if (dev_maps)
1992 kfree_rcu(dev_maps, rcu);
1993
1994 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1995 (numa_node_id >= 0) ? numa_node_id :
1996 NUMA_NO_NODE);
1997
1998 mutex_unlock(&xps_map_mutex);
1999
2000 return 0;
2001error:
2002 mutex_unlock(&xps_map_mutex);
2003
2004 if (new_dev_maps)
2005 for_each_possible_cpu(i)
2006 kfree(rcu_dereference_protected(
2007 new_dev_maps->cpu_map[i],
2008 1));
2009 kfree(new_dev_maps);
2010 return -ENOMEM;
2011}
2012EXPORT_SYMBOL(netif_set_xps_queue);
2013
2014#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002015/*
2016 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2017 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2018 */
Tom Herberte6484932010-10-18 18:04:39 +00002019int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002020{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002021 int rc;
2022
Tom Herberte6484932010-10-18 18:04:39 +00002023 if (txq < 1 || txq > dev->num_tx_queues)
2024 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002025
Ben Hutchings5c565802011-02-15 19:39:21 +00002026 if (dev->reg_state == NETREG_REGISTERED ||
2027 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002028 ASSERT_RTNL();
2029
Tom Herbert1d24eb42010-11-21 13:17:27 +00002030 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2031 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002032 if (rc)
2033 return rc;
2034
John Fastabend4f57c082011-01-17 08:06:04 +00002035 if (dev->num_tc)
2036 netif_setup_tc(dev, txq);
2037
Tom Herberte6484932010-10-18 18:04:39 +00002038 if (txq < dev->real_num_tx_queues)
2039 qdisc_reset_all_tx_gt(dev, txq);
John Fastabendf0796d52010-07-01 13:21:57 +00002040 }
Tom Herberte6484932010-10-18 18:04:39 +00002041
2042 dev->real_num_tx_queues = txq;
2043 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002044}
2045EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002046
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002047#ifdef CONFIG_RPS
2048/**
2049 * netif_set_real_num_rx_queues - set actual number of RX queues used
2050 * @dev: Network device
2051 * @rxq: Actual number of RX queues
2052 *
2053 * This must be called either with the rtnl_lock held or before
2054 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002055 * negative error code. If called before registration, it always
2056 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002057 */
2058int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2059{
2060 int rc;
2061
Tom Herbertbd25fa72010-10-18 18:00:16 +00002062 if (rxq < 1 || rxq > dev->num_rx_queues)
2063 return -EINVAL;
2064
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002065 if (dev->reg_state == NETREG_REGISTERED) {
2066 ASSERT_RTNL();
2067
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002068 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2069 rxq);
2070 if (rc)
2071 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002072 }
2073
2074 dev->real_num_rx_queues = rxq;
2075 return 0;
2076}
2077EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2078#endif
2079
Ben Hutchings2c530402012-07-10 10:55:09 +00002080/**
2081 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002082 *
2083 * This routine should set an upper limit on the number of RSS queues
2084 * used by default by multiqueue devices.
2085 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002086int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002087{
2088 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2089}
2090EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2091
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002092static inline void __netif_reschedule(struct Qdisc *q)
2093{
2094 struct softnet_data *sd;
2095 unsigned long flags;
2096
2097 local_irq_save(flags);
2098 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002099 q->next_sched = NULL;
2100 *sd->output_queue_tailp = q;
2101 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002102 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2103 local_irq_restore(flags);
2104}
2105
David S. Miller37437bb2008-07-16 02:15:04 -07002106void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002107{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002108 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2109 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002110}
2111EXPORT_SYMBOL(__netif_schedule);
2112
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002113void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002114{
David S. Miller3578b0c2010-08-03 00:24:04 -07002115 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002116 struct softnet_data *sd;
2117 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002118
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002119 local_irq_save(flags);
2120 sd = &__get_cpu_var(softnet_data);
2121 skb->next = sd->completion_queue;
2122 sd->completion_queue = skb;
2123 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2124 local_irq_restore(flags);
2125 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002126}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002127EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002128
2129void dev_kfree_skb_any(struct sk_buff *skb)
2130{
2131 if (in_irq() || irqs_disabled())
2132 dev_kfree_skb_irq(skb);
2133 else
2134 dev_kfree_skb(skb);
2135}
2136EXPORT_SYMBOL(dev_kfree_skb_any);
2137
2138
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002139/**
2140 * netif_device_detach - mark device as removed
2141 * @dev: network device
2142 *
2143 * Mark device as removed from system and therefore no longer available.
2144 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002145void netif_device_detach(struct net_device *dev)
2146{
2147 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2148 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002149 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002150 }
2151}
2152EXPORT_SYMBOL(netif_device_detach);
2153
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002154/**
2155 * netif_device_attach - mark device as attached
2156 * @dev: network device
2157 *
2158 * Mark device as attached from system and restart if needed.
2159 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002160void netif_device_attach(struct net_device *dev)
2161{
2162 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2163 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002164 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002165 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002166 }
2167}
2168EXPORT_SYMBOL(netif_device_attach);
2169
Ben Hutchings36c92472012-01-17 07:57:56 +00002170static void skb_warn_bad_offload(const struct sk_buff *skb)
2171{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002172 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002173 struct net_device *dev = skb->dev;
2174 const char *driver = "";
2175
2176 if (dev && dev->dev.parent)
2177 driver = dev_driver_string(dev->dev.parent);
2178
2179 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2180 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002181 driver, dev ? &dev->features : &null_features,
2182 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002183 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2184 skb_shinfo(skb)->gso_type, skb->ip_summed);
2185}
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187/*
2188 * Invalidate hardware checksum when packet is to be mangled, and
2189 * complete checksum manually on outgoing path.
2190 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002191int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192{
Al Virod3bc23e2006-11-14 21:24:49 -08002193 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002194 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
Patrick McHardy84fa7932006-08-29 16:44:56 -07002196 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002197 goto out_set_summed;
2198
2199 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002200 skb_warn_bad_offload(skb);
2201 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 }
2203
Michał Mirosław55508d62010-12-14 15:24:08 +00002204 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002205 BUG_ON(offset >= skb_headlen(skb));
2206 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2207
2208 offset += skb->csum_offset;
2209 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2210
2211 if (skb_cloned(skb) &&
2212 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2214 if (ret)
2215 goto out;
2216 }
2217
Herbert Xua0308472007-10-15 01:47:15 -07002218 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002219out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002221out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 return ret;
2223}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002224EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002226/**
2227 * skb_gso_segment - Perform segmentation on skb.
2228 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002229 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002230 *
2231 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002232 *
2233 * It may return NULL if the skb requires no segmentation. This is
2234 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002235 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002236struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2237 netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002238{
2239 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
Vlad Yasevich22061d82012-11-15 08:49:11 +00002240 struct packet_offload *ptype;
Al Viro252e3342006-11-14 20:48:11 -08002241 __be16 type = skb->protocol;
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002242 int vlan_depth = ETH_HLEN;
Herbert Xua430a432006-07-08 13:34:56 -07002243 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002244
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002245 while (type == htons(ETH_P_8021Q)) {
2246 struct vlan_hdr *vh;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002247
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002248 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
Jesse Gross7b9c6092010-10-20 13:56:04 +00002249 return ERR_PTR(-EINVAL);
2250
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002251 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2252 type = vh->h_vlan_encapsulated_proto;
2253 vlan_depth += VLAN_HLEN;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002254 }
2255
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002256 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002257 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002258 __skb_pull(skb, skb->mac_len);
2259
Herbert Xu67fd1a72009-01-19 16:26:44 -08002260 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002261 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002262
Herbert Xua430a432006-07-08 13:34:56 -07002263 if (skb_header_cloned(skb) &&
2264 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2265 return ERR_PTR(err);
2266 }
2267
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002268 rcu_read_lock();
Vlad Yasevich22061d82012-11-15 08:49:11 +00002269 list_for_each_entry_rcu(ptype, &offload_base, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002270 if (ptype->type == type && ptype->callbacks.gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07002271 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002272 err = ptype->callbacks.gso_send_check(skb);
Herbert Xua430a432006-07-08 13:34:56 -07002273 segs = ERR_PTR(err);
2274 if (err || skb_gso_ok(skb, features))
2275 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07002276 __skb_push(skb, (skb->data -
2277 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07002278 }
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002279 segs = ptype->callbacks.gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002280 break;
2281 }
2282 }
2283 rcu_read_unlock();
2284
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07002285 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07002286
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002287 return segs;
2288}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002289EXPORT_SYMBOL(skb_gso_segment);
2290
Herbert Xufb286bb2005-11-10 13:01:24 -08002291/* Take action when hardware reception checksum errors are detected. */
2292#ifdef CONFIG_BUG
2293void netdev_rx_csum_fault(struct net_device *dev)
2294{
2295 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002296 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002297 dump_stack();
2298 }
2299}
2300EXPORT_SYMBOL(netdev_rx_csum_fault);
2301#endif
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303/* Actually, we should eliminate this check as soon as we know, that:
2304 * 1. IOMMU is present and allows to map all the memory.
2305 * 2. No high memory really exists on this machine.
2306 */
2307
Eric Dumazet9092c652010-04-02 13:34:49 -07002308static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002310#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002312 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002313 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2314 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2315 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002316 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002317 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002320 if (PCI_DMA_BUS_IS_PHYS) {
2321 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
Eric Dumazet9092c652010-04-02 13:34:49 -07002323 if (!pdev)
2324 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002326 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2327 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002328 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2329 return 1;
2330 }
2331 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002332#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 return 0;
2334}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002336struct dev_gso_cb {
2337 void (*destructor)(struct sk_buff *skb);
2338};
2339
2340#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2341
2342static void dev_gso_skb_destructor(struct sk_buff *skb)
2343{
2344 struct dev_gso_cb *cb;
2345
2346 do {
2347 struct sk_buff *nskb = skb->next;
2348
2349 skb->next = nskb->next;
2350 nskb->next = NULL;
2351 kfree_skb(nskb);
2352 } while (skb->next);
2353
2354 cb = DEV_GSO_CB(skb);
2355 if (cb->destructor)
2356 cb->destructor(skb);
2357}
2358
2359/**
2360 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2361 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002362 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002363 *
2364 * This function segments the given skb and stores the list of segments
2365 * in skb->next.
2366 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002367static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002368{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002369 struct sk_buff *segs;
2370
Herbert Xu576a30e2006-06-27 13:22:38 -07002371 segs = skb_gso_segment(skb, features);
2372
2373 /* Verifying header integrity only. */
2374 if (!segs)
2375 return 0;
2376
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002377 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002378 return PTR_ERR(segs);
2379
2380 skb->next = segs;
2381 DEV_GSO_CB(skb)->destructor = skb->destructor;
2382 skb->destructor = dev_gso_skb_destructor;
2383
2384 return 0;
2385}
2386
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002387static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
Jesse Gross03634662011-01-09 06:23:35 +00002388{
2389 return ((features & NETIF_F_GEN_CSUM) ||
2390 ((features & NETIF_F_V4_CSUM) &&
2391 protocol == htons(ETH_P_IP)) ||
2392 ((features & NETIF_F_V6_CSUM) &&
2393 protocol == htons(ETH_P_IPV6)) ||
2394 ((features & NETIF_F_FCOE_CRC) &&
2395 protocol == htons(ETH_P_FCOE)));
2396}
2397
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002398static netdev_features_t harmonize_features(struct sk_buff *skb,
2399 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002400{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002401 if (skb->ip_summed != CHECKSUM_NONE &&
2402 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002403 features &= ~NETIF_F_ALL_CSUM;
2404 features &= ~NETIF_F_SG;
2405 } else if (illegal_highdma(skb->dev, skb)) {
2406 features &= ~NETIF_F_SG;
2407 }
2408
2409 return features;
2410}
2411
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002412netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002413{
2414 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002415 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002416
Ben Hutchings30b678d2012-07-30 15:57:00 +00002417 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2418 features &= ~NETIF_F_GSO_MASK;
2419
Jesse Gross58e998c2010-10-29 12:14:55 +00002420 if (protocol == htons(ETH_P_8021Q)) {
2421 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2422 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002423 } else if (!vlan_tx_tag_present(skb)) {
2424 return harmonize_features(skb, protocol, features);
2425 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002426
Jesse Gross6ee400a2011-01-17 20:46:00 +00002427 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002428
2429 if (protocol != htons(ETH_P_8021Q)) {
2430 return harmonize_features(skb, protocol, features);
2431 } else {
2432 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Jesse Gross6ee400a2011-01-17 20:46:00 +00002433 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002434 return harmonize_features(skb, protocol, features);
2435 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002436}
Jesse Grossf01a5232011-01-09 06:23:31 +00002437EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002438
John Fastabend6afff0c2010-06-16 14:18:12 +00002439/*
2440 * Returns true if either:
2441 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002442 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002443 */
2444static inline int skb_needs_linearize(struct sk_buff *skb,
Jesse Gross02932ce2011-01-09 06:23:34 +00002445 int features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002446{
Jesse Gross02932ce2011-01-09 06:23:34 +00002447 return skb_is_nonlinear(skb) &&
2448 ((skb_has_frag_list(skb) &&
2449 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002450 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002451 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002452}
2453
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002454int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2455 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002456{
Stephen Hemminger00829822008-11-20 20:14:53 -08002457 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002458 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002459 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002460
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002461 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002462 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002463
Eric Dumazet93f154b2009-05-18 22:19:19 -07002464 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002465 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002466 * its hot in this cpu cache
2467 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002468 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2469 skb_dst_drop(skb);
2470
Jesse Grossfc741212011-01-09 06:23:32 +00002471 features = netif_skb_features(skb);
2472
Jesse Gross7b9c6092010-10-20 13:56:04 +00002473 if (vlan_tx_tag_present(skb) &&
Jesse Grossfc741212011-01-09 06:23:32 +00002474 !(features & NETIF_F_HW_VLAN_TX)) {
Jesse Gross7b9c6092010-10-20 13:56:04 +00002475 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2476 if (unlikely(!skb))
2477 goto out;
2478
2479 skb->vlan_tci = 0;
2480 }
2481
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002482 /* If encapsulation offload request, verify we are testing
2483 * hardware encapsulation features instead of standard
2484 * features for the netdev
2485 */
2486 if (skb->encapsulation)
2487 features &= dev->hw_enc_features;
2488
Jesse Grossfc741212011-01-09 06:23:32 +00002489 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002490 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002491 goto out_kfree_skb;
2492 if (skb->next)
2493 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002494 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002495 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002496 __skb_linearize(skb))
2497 goto out_kfree_skb;
2498
2499 /* If packet is not checksummed and device does not
2500 * support checksumming for this protocol, complete
2501 * checksumming here.
2502 */
2503 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002504 if (skb->encapsulation)
2505 skb_set_inner_transport_header(skb,
2506 skb_checksum_start_offset(skb));
2507 else
2508 skb_set_transport_header(skb,
2509 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002510 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002511 skb_checksum_help(skb))
2512 goto out_kfree_skb;
2513 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002514 }
2515
Eric Dumazetb40863c2012-09-18 20:44:49 +00002516 if (!list_empty(&ptype_all))
2517 dev_queue_xmit_nit(skb, dev);
2518
Koki Sanagiec764bf2011-05-30 21:48:34 +00002519 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002520 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002521 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002522 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002523 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002524 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002525 }
2526
Herbert Xu576a30e2006-06-27 13:22:38 -07002527gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002528 do {
2529 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002530
2531 skb->next = nskb->next;
2532 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002533
2534 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002535 * If device doesn't need nskb->dst, release it right now while
Krishna Kumar068a2de2009-12-09 20:59:58 +00002536 * its hot in this cpu cache
2537 */
2538 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2539 skb_dst_drop(nskb);
2540
Eric Dumazetb40863c2012-09-18 20:44:49 +00002541 if (!list_empty(&ptype_all))
2542 dev_queue_xmit_nit(nskb, dev);
2543
Koki Sanagiec764bf2011-05-30 21:48:34 +00002544 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002545 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002546 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002547 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002548 if (rc & ~NETDEV_TX_MASK)
2549 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002550 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002551 skb->next = nskb;
2552 return rc;
2553 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002554 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002555 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002556 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002557 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002558
Patrick McHardy572a9d72009-11-10 06:14:14 +00002559out_kfree_gso_skb:
2560 if (likely(skb->next == NULL))
2561 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002562out_kfree_skb:
2563 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002564out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002565 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002566}
2567
Tom Herbert0a9627f2010-03-16 08:03:29 +00002568static u32 hashrnd __read_mostly;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002569
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002570/*
2571 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2572 * to be used as a distribution range.
2573 */
2574u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2575 unsigned int num_tx_queues)
David S. Miller8f0f2222008-07-15 03:47:03 -07002576{
David S. Miller70192982009-01-27 16:34:47 -08002577 u32 hash;
John Fastabend4f57c082011-01-17 08:06:04 +00002578 u16 qoffset = 0;
2579 u16 qcount = num_tx_queues;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002580
David S. Miller513de112009-05-03 14:43:10 -07002581 if (skb_rx_queue_recorded(skb)) {
2582 hash = skb_get_rx_queue(skb);
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002583 while (unlikely(hash >= num_tx_queues))
2584 hash -= num_tx_queues;
David S. Miller513de112009-05-03 14:43:10 -07002585 return hash;
2586 }
Eric Dumazetec581f62009-05-01 09:05:06 -07002587
John Fastabend4f57c082011-01-17 08:06:04 +00002588 if (dev->num_tc) {
2589 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2590 qoffset = dev->tc_to_txq[tc].offset;
2591 qcount = dev->tc_to_txq[tc].count;
2592 }
2593
Eric Dumazetec581f62009-05-01 09:05:06 -07002594 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08002595 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07002596 else
Eric Dumazet62b1a8a2012-06-14 06:42:44 +00002597 hash = (__force u16) skb->protocol;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002598 hash = jhash_1word(hash, hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08002599
John Fastabend4f57c082011-01-17 08:06:04 +00002600 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
David S. Miller8f0f2222008-07-15 03:47:03 -07002601}
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002602EXPORT_SYMBOL(__skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07002603
Eric Dumazeted046422009-11-13 21:54:04 +00002604static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2605{
2606 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
Joe Perchese87cc472012-05-13 21:56:26 +00002607 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2608 dev->name, queue_index,
2609 dev->real_num_tx_queues);
Eric Dumazeted046422009-11-13 21:54:04 +00002610 return 0;
2611 }
2612 return queue_index;
2613}
2614
Tom Herbert1d24eb42010-11-21 13:17:27 +00002615static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2616{
Tom Herbertbf264142010-11-26 08:36:09 +00002617#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00002618 struct xps_dev_maps *dev_maps;
2619 struct xps_map *map;
2620 int queue_index = -1;
2621
2622 rcu_read_lock();
2623 dev_maps = rcu_dereference(dev->xps_maps);
2624 if (dev_maps) {
2625 map = rcu_dereference(
2626 dev_maps->cpu_map[raw_smp_processor_id()]);
2627 if (map) {
2628 if (map->len == 1)
2629 queue_index = map->queues[0];
2630 else {
2631 u32 hash;
2632 if (skb->sk && skb->sk->sk_hash)
2633 hash = skb->sk->sk_hash;
2634 else
2635 hash = (__force u16) skb->protocol ^
2636 skb->rxhash;
2637 hash = jhash_1word(hash, hashrnd);
2638 queue_index = map->queues[
2639 ((u64)hash * map->len) >> 32];
2640 }
2641 if (unlikely(queue_index >= dev->real_num_tx_queues))
2642 queue_index = -1;
2643 }
2644 }
2645 rcu_read_unlock();
2646
2647 return queue_index;
2648#else
2649 return -1;
2650#endif
2651}
2652
Alexander Duyck416186f2013-01-10 08:56:51 +00002653u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2654{
2655 struct sock *sk = skb->sk;
2656 int queue_index = sk_tx_queue_get(sk);
2657
2658 if (queue_index < 0 || skb->ooo_okay ||
2659 queue_index >= dev->real_num_tx_queues) {
2660 int new_index = get_xps_queue(dev, skb);
2661 if (new_index < 0)
2662 new_index = skb_tx_hash(dev, skb);
2663
2664 if (queue_index != new_index && sk) {
2665 struct dst_entry *dst =
2666 rcu_dereference_check(sk->sk_dst_cache, 1);
2667
2668 if (dst && skb_dst(skb) == dst)
2669 sk_tx_queue_set(sk, queue_index);
2670
2671 }
2672
2673 queue_index = new_index;
2674 }
2675
2676 return queue_index;
2677}
2678
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002679struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2680 struct sk_buff *skb)
David S. Millere8a04642008-07-17 00:34:19 -07002681{
Alexander Duyck416186f2013-01-10 08:56:51 +00002682 int queue_index = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002683
Alexander Duyck416186f2013-01-10 08:56:51 +00002684 if (dev->real_num_tx_queues != 1) {
2685 const struct net_device_ops *ops = dev->netdev_ops;
2686 if (ops->ndo_select_queue)
2687 queue_index = ops->ndo_select_queue(dev, skb);
2688 else
2689 queue_index = __netdev_pick_tx(dev, skb);
Helmut Schaadeabc772010-09-03 02:39:56 +00002690 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002691 }
David S. Millereae792b2008-07-15 03:03:33 -07002692
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002693 skb_set_queue_mapping(skb, queue_index);
2694 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07002695}
2696
Eric Dumazet1def9232013-01-10 12:36:42 +00002697static void qdisc_pkt_len_init(struct sk_buff *skb)
2698{
2699 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2700
2701 qdisc_skb_cb(skb)->pkt_len = skb->len;
2702
2703 /* To get more precise estimation of bytes sent on wire,
2704 * we add to pkt_len the headers size of all segments
2705 */
2706 if (shinfo->gso_size) {
2707 unsigned int hdr_len = skb_transport_offset(skb);
2708
2709 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2710 hdr_len += tcp_hdrlen(skb);
2711 else
2712 hdr_len += sizeof(struct udphdr);
2713 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
2714 }
2715}
2716
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002717static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2718 struct net_device *dev,
2719 struct netdev_queue *txq)
2720{
2721 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002722 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002723 int rc;
2724
Eric Dumazet1def9232013-01-10 12:36:42 +00002725 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002726 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002727 /*
2728 * Heuristic to force contended enqueues to serialize on a
2729 * separate lock before trying to get qdisc main lock.
2730 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2731 * and dequeue packets faster.
2732 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002733 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002734 if (unlikely(contended))
2735 spin_lock(&q->busylock);
2736
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002737 spin_lock(root_lock);
2738 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2739 kfree_skb(skb);
2740 rc = NET_XMIT_DROP;
2741 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002742 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002743 /*
2744 * This is a work-conserving queue; there are no old skbs
2745 * waiting to be sent out; and the qdisc is not running -
2746 * xmit the skb directly.
2747 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002748 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2749 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002750
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002751 qdisc_bstats_update(q, skb);
2752
Eric Dumazet79640a42010-06-02 05:09:29 -07002753 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2754 if (unlikely(contended)) {
2755 spin_unlock(&q->busylock);
2756 contended = false;
2757 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002758 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002759 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002760 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002761
2762 rc = NET_XMIT_SUCCESS;
2763 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002764 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002765 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002766 if (qdisc_run_begin(q)) {
2767 if (unlikely(contended)) {
2768 spin_unlock(&q->busylock);
2769 contended = false;
2770 }
2771 __qdisc_run(q);
2772 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002773 }
2774 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002775 if (unlikely(contended))
2776 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002777 return rc;
2778}
2779
Neil Horman5bc14212011-11-22 05:10:51 +00002780#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2781static void skb_update_prio(struct sk_buff *skb)
2782{
Igor Maravic6977a792011-11-25 07:44:54 +00002783 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002784
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002785 if (!skb->priority && skb->sk && map) {
2786 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2787
2788 if (prioidx < map->priomap_len)
2789 skb->priority = map->priomap[prioidx];
2790 }
Neil Horman5bc14212011-11-22 05:10:51 +00002791}
2792#else
2793#define skb_update_prio(skb)
2794#endif
2795
Eric Dumazet745e20f2010-09-29 13:23:09 -07002796static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002797#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002798
Dave Jonesd29f7492008-07-22 14:09:06 -07002799/**
Michel Machado95603e22012-06-12 10:16:35 +00002800 * dev_loopback_xmit - loop back @skb
2801 * @skb: buffer to transmit
2802 */
2803int dev_loopback_xmit(struct sk_buff *skb)
2804{
2805 skb_reset_mac_header(skb);
2806 __skb_pull(skb, skb_network_offset(skb));
2807 skb->pkt_type = PACKET_LOOPBACK;
2808 skb->ip_summed = CHECKSUM_UNNECESSARY;
2809 WARN_ON(!skb_dst(skb));
2810 skb_dst_force(skb);
2811 netif_rx_ni(skb);
2812 return 0;
2813}
2814EXPORT_SYMBOL(dev_loopback_xmit);
2815
2816/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002817 * dev_queue_xmit - transmit a buffer
2818 * @skb: buffer to transmit
2819 *
2820 * Queue a buffer for transmission to a network device. The caller must
2821 * have set the device and priority and built the buffer before calling
2822 * this function. The function can be called from an interrupt.
2823 *
2824 * A negative errno code is returned on a failure. A success does not
2825 * guarantee the frame will be transmitted as it may be dropped due
2826 * to congestion or traffic shaping.
2827 *
2828 * -----------------------------------------------------------------------------------
2829 * I notice this method can also return errors from the queue disciplines,
2830 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2831 * be positive.
2832 *
2833 * Regardless of the return value, the skb is consumed, so it is currently
2834 * difficult to retry a send to this method. (You can bump the ref count
2835 * before sending to hold a reference for retry if you are careful.)
2836 *
2837 * When calling this method, interrupts MUST be enabled. This is because
2838 * the BH enable code must have IRQs enabled so that it will not deadlock.
2839 * --BLG
2840 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841int dev_queue_xmit(struct sk_buff *skb)
2842{
2843 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002844 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 struct Qdisc *q;
2846 int rc = -ENOMEM;
2847
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002848 /* Disable soft irqs for various locks below. Also
2849 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002851 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
Neil Horman5bc14212011-11-22 05:10:51 +00002853 skb_update_prio(skb);
2854
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002855 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002856 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002859 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002861 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002863 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002864 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 }
2866
2867 /* The device has no queue. Common case for software devices:
2868 loopback, all the sorts of tunnels...
2869
Herbert Xu932ff272006-06-09 12:20:56 -07002870 Really, it is unlikely that netif_tx_lock protection is necessary
2871 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 counters.)
2873 However, it is possible, that they rely on protection
2874 made by us here.
2875
2876 Check this and shot the lock. It is not prone from deadlocks.
2877 Either shot noqueue qdisc, it is even simpler 8)
2878 */
2879 if (dev->flags & IFF_UP) {
2880 int cpu = smp_processor_id(); /* ok because BHs are off */
2881
David S. Millerc773e842008-07-08 23:13:53 -07002882 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
Eric Dumazet745e20f2010-09-29 13:23:09 -07002884 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2885 goto recursion_alert;
2886
David S. Millerc773e842008-07-08 23:13:53 -07002887 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888
Tom Herbert734664982011-11-28 16:32:44 +00002889 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002890 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002891 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002892 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002893 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002894 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 goto out;
2896 }
2897 }
David S. Millerc773e842008-07-08 23:13:53 -07002898 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002899 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2900 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 } else {
2902 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002903 * unfortunately
2904 */
2905recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002906 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2907 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 }
2909 }
2910
2911 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002912 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 kfree_skb(skb);
2915 return rc;
2916out:
Herbert Xud4828d82006-06-22 02:28:18 -07002917 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 return rc;
2919}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002920EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
2922
2923/*=======================================================================
2924 Receiver routines
2925 =======================================================================*/
2926
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002927int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002928EXPORT_SYMBOL(netdev_max_backlog);
2929
Eric Dumazet3b098e22010-05-15 23:57:10 -07002930int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002931int netdev_budget __read_mostly = 300;
2932int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002934/* Called with irq disabled */
2935static inline void ____napi_schedule(struct softnet_data *sd,
2936 struct napi_struct *napi)
2937{
2938 list_add_tail(&napi->poll_list, &sd->poll_list);
2939 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2940}
2941
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002942/*
2943 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
Tom Herbertbdeab992011-08-14 19:45:55 +00002944 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2945 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2946 * if hash is a canonical 4-tuple hash over transport ports.
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002947 */
Tom Herbertbdeab992011-08-14 19:45:55 +00002948void __skb_get_rxhash(struct sk_buff *skb)
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002949{
Eric Dumazet4504b862011-11-28 05:23:23 +00002950 struct flow_keys keys;
2951 u32 hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002952
Eric Dumazet4504b862011-11-28 05:23:23 +00002953 if (!skb_flow_dissect(skb, &keys))
2954 return;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002955
Chema Gonzalez68622342012-09-07 13:40:50 +00002956 if (keys.ports)
Eric Dumazet4504b862011-11-28 05:23:23 +00002957 skb->l4_rxhash = 1;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002958
2959 /* get a consistent hash (same value on both flow directions) */
Chema Gonzalez68622342012-09-07 13:40:50 +00002960 if (((__force u32)keys.dst < (__force u32)keys.src) ||
2961 (((__force u32)keys.dst == (__force u32)keys.src) &&
2962 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
Eric Dumazet4504b862011-11-28 05:23:23 +00002963 swap(keys.dst, keys.src);
Chema Gonzalez68622342012-09-07 13:40:50 +00002964 swap(keys.port16[0], keys.port16[1]);
2965 }
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002966
Eric Dumazet4504b862011-11-28 05:23:23 +00002967 hash = jhash_3words((__force u32)keys.dst,
2968 (__force u32)keys.src,
2969 (__force u32)keys.ports, hashrnd);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002970 if (!hash)
2971 hash = 1;
2972
Tom Herbertbdeab992011-08-14 19:45:55 +00002973 skb->rxhash = hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002974}
2975EXPORT_SYMBOL(__skb_get_rxhash);
2976
Eric Dumazetdf334542010-03-24 19:13:54 +00002977#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002978
2979/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002980struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002981EXPORT_SYMBOL(rps_sock_flow_table);
2982
Ingo Molnarc5905af2012-02-24 08:31:31 +01002983struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002984
Ben Hutchingsc4454772011-01-19 11:03:53 +00002985static struct rps_dev_flow *
2986set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2987 struct rps_dev_flow *rflow, u16 next_cpu)
2988{
Ben Hutchings09994d12011-10-03 04:42:46 +00002989 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002990#ifdef CONFIG_RFS_ACCEL
2991 struct netdev_rx_queue *rxqueue;
2992 struct rps_dev_flow_table *flow_table;
2993 struct rps_dev_flow *old_rflow;
2994 u32 flow_id;
2995 u16 rxq_index;
2996 int rc;
2997
2998 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002999 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3000 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003001 goto out;
3002 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3003 if (rxq_index == skb_get_rx_queue(skb))
3004 goto out;
3005
3006 rxqueue = dev->_rx + rxq_index;
3007 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3008 if (!flow_table)
3009 goto out;
3010 flow_id = skb->rxhash & flow_table->mask;
3011 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3012 rxq_index, flow_id);
3013 if (rc < 0)
3014 goto out;
3015 old_rflow = rflow;
3016 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003017 rflow->filter = rc;
3018 if (old_rflow->filter == rflow->filter)
3019 old_rflow->filter = RPS_NO_FILTER;
3020 out:
3021#endif
3022 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003023 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003024 }
3025
Ben Hutchings09994d12011-10-03 04:42:46 +00003026 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003027 return rflow;
3028}
3029
Tom Herbert0a9627f2010-03-16 08:03:29 +00003030/*
3031 * get_rps_cpu is called from netif_receive_skb and returns the target
3032 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003033 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003034 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003035static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3036 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003037{
Tom Herbert0a9627f2010-03-16 08:03:29 +00003038 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003039 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07003040 struct rps_dev_flow_table *flow_table;
3041 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003042 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07003043 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003044
Tom Herbert0a9627f2010-03-16 08:03:29 +00003045 if (skb_rx_queue_recorded(skb)) {
3046 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003047 if (unlikely(index >= dev->real_num_rx_queues)) {
3048 WARN_ONCE(dev->real_num_rx_queues > 1,
3049 "%s received packet on queue %u, but number "
3050 "of RX queues is %u\n",
3051 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003052 goto done;
3053 }
3054 rxqueue = dev->_rx + index;
3055 } else
3056 rxqueue = dev->_rx;
3057
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003058 map = rcu_dereference(rxqueue->rps_map);
3059 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003060 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003061 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003062 tcpu = map->cpus[0];
3063 if (cpu_online(tcpu))
3064 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003065 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003066 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003067 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003068 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003069 }
3070
Changli Gao2d47b452010-08-17 19:00:56 +00003071 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003072 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003073 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003074
Tom Herbertfec5e652010-04-16 16:01:27 -07003075 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3076 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3077 if (flow_table && sock_flow_table) {
3078 u16 next_cpu;
3079 struct rps_dev_flow *rflow;
3080
3081 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3082 tcpu = rflow->cpu;
3083
3084 next_cpu = sock_flow_table->ents[skb->rxhash &
3085 sock_flow_table->mask];
3086
3087 /*
3088 * If the desired CPU (where last recvmsg was done) is
3089 * different from current CPU (one in the rx-queue flow
3090 * table entry), switch if one of the following holds:
3091 * - Current CPU is unset (equal to RPS_NO_CPU).
3092 * - Current CPU is offline.
3093 * - The current CPU's queue tail has advanced beyond the
3094 * last packet that was enqueued using this table entry.
3095 * This guarantees that all previous packets for the flow
3096 * have been dequeued, thus preserving in order delivery.
3097 */
3098 if (unlikely(tcpu != next_cpu) &&
3099 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3100 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003101 rflow->last_qtail)) >= 0)) {
3102 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003103 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003104 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003105
Tom Herbertfec5e652010-04-16 16:01:27 -07003106 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3107 *rflowp = rflow;
3108 cpu = tcpu;
3109 goto done;
3110 }
3111 }
3112
Tom Herbert0a9627f2010-03-16 08:03:29 +00003113 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003114 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003115
3116 if (cpu_online(tcpu)) {
3117 cpu = tcpu;
3118 goto done;
3119 }
3120 }
3121
3122done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003123 return cpu;
3124}
3125
Ben Hutchingsc4454772011-01-19 11:03:53 +00003126#ifdef CONFIG_RFS_ACCEL
3127
3128/**
3129 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3130 * @dev: Device on which the filter was set
3131 * @rxq_index: RX queue index
3132 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3133 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3134 *
3135 * Drivers that implement ndo_rx_flow_steer() should periodically call
3136 * this function for each installed filter and remove the filters for
3137 * which it returns %true.
3138 */
3139bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3140 u32 flow_id, u16 filter_id)
3141{
3142 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3143 struct rps_dev_flow_table *flow_table;
3144 struct rps_dev_flow *rflow;
3145 bool expire = true;
3146 int cpu;
3147
3148 rcu_read_lock();
3149 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3150 if (flow_table && flow_id <= flow_table->mask) {
3151 rflow = &flow_table->flows[flow_id];
3152 cpu = ACCESS_ONCE(rflow->cpu);
3153 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3154 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3155 rflow->last_qtail) <
3156 (int)(10 * flow_table->mask)))
3157 expire = false;
3158 }
3159 rcu_read_unlock();
3160 return expire;
3161}
3162EXPORT_SYMBOL(rps_may_expire_flow);
3163
3164#endif /* CONFIG_RFS_ACCEL */
3165
Tom Herbert0a9627f2010-03-16 08:03:29 +00003166/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003167static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003168{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003169 struct softnet_data *sd = data;
3170
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003171 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003172 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003173}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003174
Tom Herbertfec5e652010-04-16 16:01:27 -07003175#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003176
3177/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003178 * Check if this softnet_data structure is another cpu one
3179 * If yes, queue it to our IPI list and return 1
3180 * If no, return 0
3181 */
3182static int rps_ipi_queued(struct softnet_data *sd)
3183{
3184#ifdef CONFIG_RPS
3185 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3186
3187 if (sd != mysd) {
3188 sd->rps_ipi_next = mysd->rps_ipi_list;
3189 mysd->rps_ipi_list = sd;
3190
3191 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3192 return 1;
3193 }
3194#endif /* CONFIG_RPS */
3195 return 0;
3196}
3197
3198/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003199 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3200 * queue (may be a remote CPU queue).
3201 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003202static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3203 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003204{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003205 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003206 unsigned long flags;
3207
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003208 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003209
3210 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003211
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003212 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003213 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3214 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003215enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003216 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003217 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003218 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003219 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003220 return NET_RX_SUCCESS;
3221 }
3222
Eric Dumazetebda37c22010-05-06 23:51:21 +00003223 /* Schedule NAPI for backlog device
3224 * We can use non atomic operation since we own the queue lock
3225 */
3226 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003227 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003228 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003229 }
3230 goto enqueue;
3231 }
3232
Changli Gaodee42872010-05-02 05:42:16 +00003233 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003234 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003235
Tom Herbert0a9627f2010-03-16 08:03:29 +00003236 local_irq_restore(flags);
3237
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003238 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003239 kfree_skb(skb);
3240 return NET_RX_DROP;
3241}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243/**
3244 * netif_rx - post buffer to the network code
3245 * @skb: buffer to post
3246 *
3247 * This function receives a packet from a device driver and queues it for
3248 * the upper (protocol) levels to process. It always succeeds. The buffer
3249 * may be dropped during processing for congestion control or by the
3250 * protocol layers.
3251 *
3252 * return values:
3253 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 * NET_RX_DROP (packet was dropped)
3255 *
3256 */
3257
3258int netif_rx(struct sk_buff *skb)
3259{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003260 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261
3262 /* if netpoll wants it, pretend we never saw it */
3263 if (netpoll_rx(skb))
3264 return NET_RX_DROP;
3265
Eric Dumazet588f0332011-11-15 04:12:55 +00003266 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267
Koki Sanagicf66ba52010-08-23 18:45:02 +09003268 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003269#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003270 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003271 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003272 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273
Changli Gaocece1942010-08-07 20:35:43 -07003274 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003275 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003276
3277 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003278 if (cpu < 0)
3279 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003280
3281 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3282
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003283 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003284 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003285 } else
3286#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003287 {
3288 unsigned int qtail;
3289 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3290 put_cpu();
3291 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003292 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003294EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295
3296int netif_rx_ni(struct sk_buff *skb)
3297{
3298 int err;
3299
3300 preempt_disable();
3301 err = netif_rx(skb);
3302 if (local_softirq_pending())
3303 do_softirq();
3304 preempt_enable();
3305
3306 return err;
3307}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308EXPORT_SYMBOL(netif_rx_ni);
3309
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310static void net_tx_action(struct softirq_action *h)
3311{
3312 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3313
3314 if (sd->completion_queue) {
3315 struct sk_buff *clist;
3316
3317 local_irq_disable();
3318 clist = sd->completion_queue;
3319 sd->completion_queue = NULL;
3320 local_irq_enable();
3321
3322 while (clist) {
3323 struct sk_buff *skb = clist;
3324 clist = clist->next;
3325
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003326 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003327 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 __kfree_skb(skb);
3329 }
3330 }
3331
3332 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003333 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
3335 local_irq_disable();
3336 head = sd->output_queue;
3337 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003338 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 local_irq_enable();
3340
3341 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003342 struct Qdisc *q = head;
3343 spinlock_t *root_lock;
3344
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 head = head->next_sched;
3346
David S. Miller5fb66222008-08-02 20:02:43 -07003347 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003348 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003349 smp_mb__before_clear_bit();
3350 clear_bit(__QDISC_STATE_SCHED,
3351 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003352 qdisc_run(q);
3353 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003355 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003356 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003357 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003358 } else {
3359 smp_mb__before_clear_bit();
3360 clear_bit(__QDISC_STATE_SCHED,
3361 &q->state);
3362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 }
3364 }
3365 }
3366}
3367
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003368#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3369 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003370/* This hook is defined here for ATM LANE */
3371int (*br_fdb_test_addr_hook)(struct net_device *dev,
3372 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003373EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003374#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376#ifdef CONFIG_NET_CLS_ACT
3377/* TODO: Maybe we should just force sch_ingress to be compiled in
3378 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3379 * a compare and 2 stores extra right now if we dont have it on
3380 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003381 * NOTE: This doesn't stop any functionality; if you dont have
3382 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 *
3384 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003385static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003388 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003389 int result = TC_ACT_OK;
3390 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003391
Stephen Hemmingerde384832010-08-01 00:33:23 -07003392 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003393 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3394 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003395 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 }
3397
Herbert Xuf697c3e2007-10-14 00:38:47 -07003398 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3399 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3400
David S. Miller83874002008-07-17 00:53:03 -07003401 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003402 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003403 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003404 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3405 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003406 spin_unlock(qdisc_lock(q));
3407 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003408
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 return result;
3410}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003411
3412static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3413 struct packet_type **pt_prev,
3414 int *ret, struct net_device *orig_dev)
3415{
Eric Dumazet24824a02010-10-02 06:11:55 +00003416 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3417
3418 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003419 goto out;
3420
3421 if (*pt_prev) {
3422 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3423 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003424 }
3425
Eric Dumazet24824a02010-10-02 06:11:55 +00003426 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003427 case TC_ACT_SHOT:
3428 case TC_ACT_STOLEN:
3429 kfree_skb(skb);
3430 return NULL;
3431 }
3432
3433out:
3434 skb->tc_verd = 0;
3435 return skb;
3436}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437#endif
3438
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003439/**
3440 * netdev_rx_handler_register - register receive handler
3441 * @dev: device to register a handler for
3442 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003443 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003444 *
3445 * Register a receive hander for a device. This handler will then be
3446 * called from __netif_receive_skb. A negative errno code is returned
3447 * on a failure.
3448 *
3449 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003450 *
3451 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003452 */
3453int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003454 rx_handler_func_t *rx_handler,
3455 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003456{
3457 ASSERT_RTNL();
3458
3459 if (dev->rx_handler)
3460 return -EBUSY;
3461
Jiri Pirko93e2c322010-06-10 03:34:59 +00003462 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003463 rcu_assign_pointer(dev->rx_handler, rx_handler);
3464
3465 return 0;
3466}
3467EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3468
3469/**
3470 * netdev_rx_handler_unregister - unregister receive handler
3471 * @dev: device to unregister a handler from
3472 *
3473 * Unregister a receive hander from a device.
3474 *
3475 * The caller must hold the rtnl_mutex.
3476 */
3477void netdev_rx_handler_unregister(struct net_device *dev)
3478{
3479
3480 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003481 RCU_INIT_POINTER(dev->rx_handler, NULL);
3482 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003483}
3484EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3485
Mel Gormanb4b9e352012-07-31 16:44:26 -07003486/*
3487 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3488 * the special handling of PFMEMALLOC skbs.
3489 */
3490static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3491{
3492 switch (skb->protocol) {
3493 case __constant_htons(ETH_P_ARP):
3494 case __constant_htons(ETH_P_IP):
3495 case __constant_htons(ETH_P_IPV6):
3496 case __constant_htons(ETH_P_8021Q):
3497 return true;
3498 default:
3499 return false;
3500 }
3501}
3502
Eric Dumazet10f744d2010-03-28 23:07:20 -07003503static int __netif_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
3505 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003506 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003507 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003508 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003509 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003511 __be16 type;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003512 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513
Eric Dumazet588f0332011-11-15 04:12:55 +00003514 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003515
Koki Sanagicf66ba52010-08-23 18:45:02 +09003516 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003517
Mel Gormanb4b9e352012-07-31 16:44:26 -07003518 /*
3519 * PFMEMALLOC skbs are special, they should
3520 * - be delivered to SOCK_MEMALLOC sockets only
3521 * - stay away from userspace
3522 * - have bounded memory usage
3523 *
3524 * Use PF_MEMALLOC as this saves us from propagating the allocation
3525 * context down to all allocation sites.
3526 */
3527 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3528 current->flags |= PF_MEMALLOC;
3529
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003531 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003532 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003534 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003535
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003536 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003537 if (!skb_transport_header_was_set(skb))
3538 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003539 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
3541 pt_prev = NULL;
3542
3543 rcu_read_lock();
3544
David S. Miller63d8ea72011-02-28 10:48:59 -08003545another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003546 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003547
3548 __this_cpu_inc(softnet_data.processed);
3549
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003550 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3551 skb = vlan_untag(skb);
3552 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003553 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003554 }
3555
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556#ifdef CONFIG_NET_CLS_ACT
3557 if (skb->tc_verd & TC_NCLS) {
3558 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3559 goto ncls;
3560 }
3561#endif
3562
Mel Gormanb4b9e352012-07-31 16:44:26 -07003563 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3564 goto skip_taps;
3565
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003567 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003568 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003569 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 pt_prev = ptype;
3571 }
3572 }
3573
Mel Gormanb4b9e352012-07-31 16:44:26 -07003574skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003576 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3577 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003578 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579ncls:
3580#endif
3581
Mel Gormanb4b9e352012-07-31 16:44:26 -07003582 if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3583 && !skb_pfmemalloc_protocol(skb))
3584 goto drop;
3585
John Fastabend24257172011-10-10 09:16:41 +00003586 if (vlan_tx_tag_present(skb)) {
3587 if (pt_prev) {
3588 ret = deliver_skb(skb, pt_prev, orig_dev);
3589 pt_prev = NULL;
3590 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003591 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003592 goto another_round;
3593 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003594 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003595 }
3596
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003597 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003598 if (rx_handler) {
3599 if (pt_prev) {
3600 ret = deliver_skb(skb, pt_prev, orig_dev);
3601 pt_prev = NULL;
3602 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003603 switch (rx_handler(&skb)) {
3604 case RX_HANDLER_CONSUMED:
Mel Gormanb4b9e352012-07-31 16:44:26 -07003605 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003606 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003607 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003608 case RX_HANDLER_EXACT:
3609 deliver_exact = true;
3610 case RX_HANDLER_PASS:
3611 break;
3612 default:
3613 BUG();
3614 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003617 if (vlan_tx_nonzero_tag_present(skb))
3618 skb->pkt_type = PACKET_OTHERHOST;
3619
David S. Miller63d8ea72011-02-28 10:48:59 -08003620 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003621 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003622
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003624 list_for_each_entry_rcu(ptype,
3625 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003626 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003627 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3628 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003629 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003630 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 pt_prev = ptype;
3632 }
3633 }
3634
3635 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003636 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003637 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003638 else
3639 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003641drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003642 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 kfree_skb(skb);
3644 /* Jamal, now you will not able to escape explaining
3645 * me how you were going to use this. :-)
3646 */
3647 ret = NET_RX_DROP;
3648 }
3649
Mel Gormanb4b9e352012-07-31 16:44:26 -07003650unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003652out:
3653 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 return ret;
3655}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003656
3657/**
3658 * netif_receive_skb - process receive buffer from network
3659 * @skb: buffer to process
3660 *
3661 * netif_receive_skb() is the main receive data processing function.
3662 * It always succeeds. The buffer may be dropped during processing
3663 * for congestion control or by the protocol layers.
3664 *
3665 * This function may only be called from softirq context and interrupts
3666 * should be enabled.
3667 *
3668 * Return values (usually ignored):
3669 * NET_RX_SUCCESS: no congestion
3670 * NET_RX_DROP: packet was dropped
3671 */
3672int netif_receive_skb(struct sk_buff *skb)
3673{
Eric Dumazet588f0332011-11-15 04:12:55 +00003674 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003675
Richard Cochranc1f19b52010-07-17 08:49:36 +00003676 if (skb_defer_rx_timestamp(skb))
3677 return NET_RX_SUCCESS;
3678
Eric Dumazetdf334542010-03-24 19:13:54 +00003679#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003680 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003681 struct rps_dev_flow voidflow, *rflow = &voidflow;
3682 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003683
Eric Dumazet3b098e22010-05-15 23:57:10 -07003684 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003685
Eric Dumazet3b098e22010-05-15 23:57:10 -07003686 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003687
Eric Dumazet3b098e22010-05-15 23:57:10 -07003688 if (cpu >= 0) {
3689 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3690 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003691 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003692 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003693 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003694 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003695#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003696 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003697}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003698EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699
Eric Dumazet88751272010-04-19 05:07:33 +00003700/* Network device is going away, flush any packets still pending
3701 * Called with irqs disabled.
3702 */
Changli Gao152102c2010-03-30 20:16:22 +00003703static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003704{
Changli Gao152102c2010-03-30 20:16:22 +00003705 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003706 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003707 struct sk_buff *skb, *tmp;
3708
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003709 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003710 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003711 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003712 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003713 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003714 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003715 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003716 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003717 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003718
3719 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3720 if (skb->dev == dev) {
3721 __skb_unlink(skb, &sd->process_queue);
3722 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003723 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003724 }
3725 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003726}
3727
Herbert Xud565b0a2008-12-15 23:38:52 -08003728static int napi_gro_complete(struct sk_buff *skb)
3729{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003730 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003731 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003732 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003733 int err = -ENOENT;
3734
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003735 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3736
Herbert Xufc59f9a2009-04-14 15:11:06 -07003737 if (NAPI_GRO_CB(skb)->count == 1) {
3738 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003739 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003740 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003741
3742 rcu_read_lock();
3743 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003744 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003745 continue;
3746
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003747 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003748 break;
3749 }
3750 rcu_read_unlock();
3751
3752 if (err) {
3753 WARN_ON(&ptype->list == head);
3754 kfree_skb(skb);
3755 return NET_RX_SUCCESS;
3756 }
3757
3758out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003759 return netif_receive_skb(skb);
3760}
3761
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003762/* napi->gro_list contains packets ordered by age.
3763 * youngest packets at the head of it.
3764 * Complete skbs in reverse order to reduce latencies.
3765 */
3766void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003767{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003768 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003769
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003770 /* scan list and build reverse chain */
3771 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3772 skb->prev = prev;
3773 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003774 }
3775
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003776 for (skb = prev; skb; skb = prev) {
3777 skb->next = NULL;
3778
3779 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3780 return;
3781
3782 prev = skb->prev;
3783 napi_gro_complete(skb);
3784 napi->gro_count--;
3785 }
3786
Herbert Xud565b0a2008-12-15 23:38:52 -08003787 napi->gro_list = NULL;
3788}
Eric Dumazet86cac582010-08-31 18:25:32 +00003789EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003790
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003791static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3792{
3793 struct sk_buff *p;
3794 unsigned int maclen = skb->dev->hard_header_len;
3795
3796 for (p = napi->gro_list; p; p = p->next) {
3797 unsigned long diffs;
3798
3799 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3800 diffs |= p->vlan_tci ^ skb->vlan_tci;
3801 if (maclen == ETH_HLEN)
3802 diffs |= compare_ether_header(skb_mac_header(p),
3803 skb_gro_mac_header(skb));
3804 else if (!diffs)
3805 diffs = memcmp(skb_mac_header(p),
3806 skb_gro_mac_header(skb),
3807 maclen);
3808 NAPI_GRO_CB(p)->same_flow = !diffs;
3809 NAPI_GRO_CB(p)->flush = 0;
3810 }
3811}
3812
Rami Rosenbb728822012-11-28 21:55:25 +00003813static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003814{
3815 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003816 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003817 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003818 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003819 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08003820 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003821 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003822
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003823 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003824 goto normal;
3825
David S. Miller21dc3302010-08-23 00:13:46 -07003826 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003827 goto normal;
3828
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003829 gro_list_prepare(napi, skb);
3830
Herbert Xud565b0a2008-12-15 23:38:52 -08003831 rcu_read_lock();
3832 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003833 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003834 continue;
3835
Herbert Xu86911732009-01-29 14:19:50 +00003836 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08003837 mac_len = skb->network_header - skb->mac_header;
3838 skb->mac_len = mac_len;
3839 NAPI_GRO_CB(skb)->same_flow = 0;
3840 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003841 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003842
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003843 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003844 break;
3845 }
3846 rcu_read_unlock();
3847
3848 if (&ptype->list == head)
3849 goto normal;
3850
Herbert Xu0da2afd52008-12-26 14:57:42 -08003851 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003852 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003853
Herbert Xud565b0a2008-12-15 23:38:52 -08003854 if (pp) {
3855 struct sk_buff *nskb = *pp;
3856
3857 *pp = nskb->next;
3858 nskb->next = NULL;
3859 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003860 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003861 }
3862
Herbert Xu0da2afd52008-12-26 14:57:42 -08003863 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003864 goto ok;
3865
Herbert Xu4ae55442009-02-08 18:00:36 +00003866 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003867 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003868
Herbert Xu4ae55442009-02-08 18:00:36 +00003869 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003870 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003871 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003872 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003873 skb->next = napi->gro_list;
3874 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003875 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003876
Herbert Xuad0f9902009-02-01 01:24:55 -08003877pull:
Herbert Xucb189782009-05-26 18:50:31 +00003878 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3879 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3880
3881 BUG_ON(skb->end - skb->tail < grow);
3882
3883 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3884
3885 skb->tail += grow;
3886 skb->data_len -= grow;
3887
3888 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003889 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003890
Eric Dumazet9e903e02011-10-18 21:00:24 +00003891 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003892 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003893 memmove(skb_shinfo(skb)->frags,
3894 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003895 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003896 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003897 }
3898
Herbert Xud565b0a2008-12-15 23:38:52 -08003899ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003900 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003901
3902normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003903 ret = GRO_NORMAL;
3904 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003905}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003906
Herbert Xu96e93ea2009-01-06 10:49:34 -08003907
Rami Rosenbb728822012-11-28 21:55:25 +00003908static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003909{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003910 switch (ret) {
3911 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003912 if (netif_receive_skb(skb))
3913 ret = GRO_DROP;
3914 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003915
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003916 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003917 kfree_skb(skb);
3918 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003919
Eric Dumazetdaa86542012-04-19 07:07:40 +00003920 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003921 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3922 kmem_cache_free(skbuff_head_cache, skb);
3923 else
3924 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003925 break;
3926
Ben Hutchings5b252f02009-10-29 07:17:09 +00003927 case GRO_HELD:
3928 case GRO_MERGED:
3929 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003930 }
3931
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003932 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003933}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003934
Eric Dumazetca07e432012-10-06 22:28:06 +00003935static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003936{
Eric Dumazetca07e432012-10-06 22:28:06 +00003937 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3938 const skb_frag_t *frag0 = &pinfo->frags[0];
3939
Herbert Xu78a478d2009-05-26 18:50:21 +00003940 NAPI_GRO_CB(skb)->data_offset = 0;
3941 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003942 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003943
Herbert Xu78d3fd02009-05-26 18:50:23 +00003944 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003945 pinfo->nr_frags &&
3946 !PageHighMem(skb_frag_page(frag0))) {
3947 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3948 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003949 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003950}
Herbert Xu78a478d2009-05-26 18:50:21 +00003951
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003952gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003953{
Herbert Xu86911732009-01-29 14:19:50 +00003954 skb_gro_reset_offset(skb);
3955
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003956 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003957}
3958EXPORT_SYMBOL(napi_gro_receive);
3959
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003960static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003961{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003962 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003963 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3964 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003965 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003966 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003967 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003968
3969 napi->skb = skb;
3970}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003971
Herbert Xu76620aa2009-04-16 02:02:07 -07003972struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003973{
Herbert Xu5d38a072009-01-04 16:13:40 -08003974 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003975
3976 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003977 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3978 if (skb)
3979 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003980 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003981 return skb;
3982}
Herbert Xu76620aa2009-04-16 02:02:07 -07003983EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003984
Rami Rosenbb728822012-11-28 21:55:25 +00003985static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003986 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003987{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003988 switch (ret) {
3989 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003990 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003991 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003992
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003993 if (ret == GRO_HELD)
3994 skb_gro_pull(skb, -ETH_HLEN);
3995 else if (netif_receive_skb(skb))
3996 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003997 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003998
3999 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004000 case GRO_MERGED_FREE:
4001 napi_reuse_skb(napi, skb);
4002 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004003
4004 case GRO_MERGED:
4005 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004006 }
4007
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004008 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004009}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004010
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004011static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004012{
Herbert Xu76620aa2009-04-16 02:02:07 -07004013 struct sk_buff *skb = napi->skb;
4014 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004015 unsigned int hlen;
4016 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004017
4018 napi->skb = NULL;
4019
4020 skb_reset_mac_header(skb);
4021 skb_gro_reset_offset(skb);
4022
Herbert Xua5b1cf22009-05-26 18:50:28 +00004023 off = skb_gro_offset(skb);
4024 hlen = off + sizeof(*eth);
4025 eth = skb_gro_header_fast(skb, off);
4026 if (skb_gro_header_hard(skb, hlen)) {
4027 eth = skb_gro_header_slow(skb, hlen, off);
4028 if (unlikely(!eth)) {
4029 napi_reuse_skb(napi, skb);
4030 skb = NULL;
4031 goto out;
4032 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004033 }
4034
4035 skb_gro_pull(skb, sizeof(*eth));
4036
4037 /*
4038 * This works because the only protocols we care about don't require
4039 * special handling. We'll fix it up properly at the end.
4040 */
4041 skb->protocol = eth->h_proto;
4042
4043out:
4044 return skb;
4045}
Herbert Xu76620aa2009-04-16 02:02:07 -07004046
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004047gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004048{
4049 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004050
4051 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004052 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004053
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004054 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004055}
4056EXPORT_SYMBOL(napi_gro_frags);
4057
Eric Dumazete326bed2010-04-22 00:22:45 -07004058/*
4059 * net_rps_action sends any pending IPI's for rps.
4060 * Note: called with local irq disabled, but exits with local irq enabled.
4061 */
4062static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4063{
4064#ifdef CONFIG_RPS
4065 struct softnet_data *remsd = sd->rps_ipi_list;
4066
4067 if (remsd) {
4068 sd->rps_ipi_list = NULL;
4069
4070 local_irq_enable();
4071
4072 /* Send pending IPI's to kick RPS processing on remote cpus. */
4073 while (remsd) {
4074 struct softnet_data *next = remsd->rps_ipi_next;
4075
4076 if (cpu_online(remsd->cpu))
4077 __smp_call_function_single(remsd->cpu,
4078 &remsd->csd, 0);
4079 remsd = next;
4080 }
4081 } else
4082#endif
4083 local_irq_enable();
4084}
4085
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004086static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087{
4088 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004089 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
Eric Dumazete326bed2010-04-22 00:22:45 -07004091#ifdef CONFIG_RPS
4092 /* Check if we have pending ipi, its better to send them now,
4093 * not waiting net_rx_action() end.
4094 */
4095 if (sd->rps_ipi_list) {
4096 local_irq_disable();
4097 net_rps_action_and_irq_enable(sd);
4098 }
4099#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004100 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004101 local_irq_disable();
4102 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004104 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105
Changli Gao6e7676c2010-04-27 15:07:33 -07004106 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004107 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004108 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004109 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004110 input_queue_head_incr(sd);
4111 if (++work >= quota) {
4112 local_irq_enable();
4113 return work;
4114 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
Changli Gao6e7676c2010-04-27 15:07:33 -07004117 rps_lock(sd);
4118 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004119 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004120 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4121 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004122
Changli Gao6e7676c2010-04-27 15:07:33 -07004123 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004124 /*
4125 * Inline a custom version of __napi_complete().
4126 * only current cpu owns and manipulates this napi,
4127 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4128 * we can use a plain write instead of clear_bit(),
4129 * and we dont need an smp_mb() memory barrier.
4130 */
4131 list_del(&napi->poll_list);
4132 napi->state = 0;
4133
Changli Gao6e7676c2010-04-27 15:07:33 -07004134 quota = work + qlen;
4135 }
4136 rps_unlock(sd);
4137 }
4138 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004140 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141}
4142
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004143/**
4144 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004145 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004146 *
4147 * The entry's receive function will be scheduled to run
4148 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004149void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004150{
4151 unsigned long flags;
4152
4153 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004154 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004155 local_irq_restore(flags);
4156}
4157EXPORT_SYMBOL(__napi_schedule);
4158
Herbert Xud565b0a2008-12-15 23:38:52 -08004159void __napi_complete(struct napi_struct *n)
4160{
4161 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4162 BUG_ON(n->gro_list);
4163
4164 list_del(&n->poll_list);
4165 smp_mb__before_clear_bit();
4166 clear_bit(NAPI_STATE_SCHED, &n->state);
4167}
4168EXPORT_SYMBOL(__napi_complete);
4169
4170void napi_complete(struct napi_struct *n)
4171{
4172 unsigned long flags;
4173
4174 /*
4175 * don't let napi dequeue from the cpu poll list
4176 * just in case its running on a different cpu
4177 */
4178 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4179 return;
4180
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004181 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004182 local_irq_save(flags);
4183 __napi_complete(n);
4184 local_irq_restore(flags);
4185}
4186EXPORT_SYMBOL(napi_complete);
4187
4188void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4189 int (*poll)(struct napi_struct *, int), int weight)
4190{
4191 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004192 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004193 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004194 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004195 napi->poll = poll;
4196 napi->weight = weight;
4197 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004198 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004199#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004200 spin_lock_init(&napi->poll_lock);
4201 napi->poll_owner = -1;
4202#endif
4203 set_bit(NAPI_STATE_SCHED, &napi->state);
4204}
4205EXPORT_SYMBOL(netif_napi_add);
4206
4207void netif_napi_del(struct napi_struct *napi)
4208{
4209 struct sk_buff *skb, *next;
4210
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004211 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004212 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004213
4214 for (skb = napi->gro_list; skb; skb = next) {
4215 next = skb->next;
4216 skb->next = NULL;
4217 kfree_skb(skb);
4218 }
4219
4220 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004221 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004222}
4223EXPORT_SYMBOL(netif_napi_del);
4224
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225static void net_rx_action(struct softirq_action *h)
4226{
Eric Dumazete326bed2010-04-22 00:22:45 -07004227 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004228 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004229 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004230 void *have;
4231
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 local_irq_disable();
4233
Eric Dumazete326bed2010-04-22 00:22:45 -07004234 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004235 struct napi_struct *n;
4236 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004238 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004239 * Allow this to run for 2 jiffies since which will allow
4240 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004241 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004242 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 goto softnet_break;
4244
4245 local_irq_enable();
4246
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004247 /* Even though interrupts have been re-enabled, this
4248 * access is safe because interrupts can only add new
4249 * entries to the tail of this list, and only ->poll()
4250 * calls can remove this head entry from the list.
4251 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004252 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004254 have = netpoll_poll_lock(n);
4255
4256 weight = n->weight;
4257
David S. Miller0a7606c2007-10-29 21:28:47 -07004258 /* This NAPI_STATE_SCHED test is for avoiding a race
4259 * with netpoll's poll_napi(). Only the entity which
4260 * obtains the lock and sees NAPI_STATE_SCHED set will
4261 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004262 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004263 */
4264 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004265 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004266 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004267 trace_napi_poll(n);
4268 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004269
4270 WARN_ON_ONCE(work > weight);
4271
4272 budget -= work;
4273
4274 local_irq_disable();
4275
4276 /* Drivers must not modify the NAPI state if they
4277 * consume the entire weight. In such cases this code
4278 * still "owns" the NAPI instance and therefore can
4279 * move the instance around on the list at-will.
4280 */
David S. Millerfed17f32008-01-07 21:00:40 -08004281 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004282 if (unlikely(napi_disable_pending(n))) {
4283 local_irq_enable();
4284 napi_complete(n);
4285 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004286 } else {
4287 if (n->gro_list) {
4288 /* flush too old packets
4289 * If HZ < 1000, flush all packets.
4290 */
4291 local_irq_enable();
4292 napi_gro_flush(n, HZ >= 1000);
4293 local_irq_disable();
4294 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004295 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004296 }
David S. Millerfed17f32008-01-07 21:00:40 -08004297 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004298
4299 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 }
4301out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004302 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004303
Chris Leechdb217332006-06-17 21:24:58 -07004304#ifdef CONFIG_NET_DMA
4305 /*
4306 * There may not be any more sk_buffs coming right now, so push
4307 * any pending DMA copies to hardware
4308 */
Dan Williams2ba05622009-01-06 11:38:14 -07004309 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004310#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004311
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 return;
4313
4314softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004315 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4317 goto out;
4318}
4319
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004320static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321
4322/**
4323 * register_gifconf - register a SIOCGIF handler
4324 * @family: Address family
4325 * @gifconf: Function handler
4326 *
4327 * Register protocol dependent address dumping routines. The handler
4328 * that is passed must not be freed or reused until it has been replaced
4329 * by another handler.
4330 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004331int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332{
4333 if (family >= NPROTO)
4334 return -EINVAL;
4335 gifconf_list[family] = gifconf;
4336 return 0;
4337}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004338EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339
4340
4341/*
4342 * Map an interface index to its name (SIOCGIFNAME)
4343 */
4344
4345/*
4346 * We need this ioctl for efficient implementation of the
4347 * if_indextoname() function required by the IPv6 API. Without
4348 * it, we would have to search all the interfaces to find a
4349 * match. --pb
4350 */
4351
Eric W. Biederman881d9662007-09-17 11:56:21 -07004352static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353{
4354 struct net_device *dev;
4355 struct ifreq ifr;
Brian Haleyc91f6df2012-11-26 05:21:08 +00004356 unsigned seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357
4358 /*
4359 * Fetch the caller's info block.
4360 */
4361
4362 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4363 return -EFAULT;
4364
Brian Haleyc91f6df2012-11-26 05:21:08 +00004365retry:
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004366 seq = read_seqcount_begin(&devnet_rename_seq);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004367 rcu_read_lock();
4368 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004370 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 return -ENODEV;
4372 }
4373
4374 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004375 rcu_read_unlock();
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004376 if (read_seqcount_retry(&devnet_rename_seq, seq))
Brian Haleyc91f6df2012-11-26 05:21:08 +00004377 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
4379 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4380 return -EFAULT;
4381 return 0;
4382}
4383
4384/*
4385 * Perform a SIOCGIFCONF call. This structure will change
4386 * size eventually, and there is nothing I can do about it.
4387 * Thus we will need a 'compatibility mode'.
4388 */
4389
Eric W. Biederman881d9662007-09-17 11:56:21 -07004390static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391{
4392 struct ifconf ifc;
4393 struct net_device *dev;
4394 char __user *pos;
4395 int len;
4396 int total;
4397 int i;
4398
4399 /*
4400 * Fetch the caller's info block.
4401 */
4402
4403 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4404 return -EFAULT;
4405
4406 pos = ifc.ifc_buf;
4407 len = ifc.ifc_len;
4408
4409 /*
4410 * Loop over the interfaces, and write an info block for each.
4411 */
4412
4413 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004414 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415 for (i = 0; i < NPROTO; i++) {
4416 if (gifconf_list[i]) {
4417 int done;
4418 if (!pos)
4419 done = gifconf_list[i](dev, NULL, 0);
4420 else
4421 done = gifconf_list[i](dev, pos + total,
4422 len - total);
4423 if (done < 0)
4424 return -EFAULT;
4425 total += done;
4426 }
4427 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429
4430 /*
4431 * All done. Write the updated control block back to the caller.
4432 */
4433 ifc.ifc_len = total;
4434
4435 /*
4436 * Both BSD and Solaris return 0 here, so we do too.
4437 */
4438 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4439}
4440
4441#ifdef CONFIG_PROC_FS
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004442
Eric Dumazet2def16a2012-04-02 22:33:02 +00004443#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004444
4445#define get_bucket(x) ((x) >> BUCKET_SPACE)
4446#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4447#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4448
Eric Dumazet2def16a2012-04-02 22:33:02 +00004449static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004450{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004451 struct net *net = seq_file_net(seq);
4452 struct net_device *dev;
4453 struct hlist_node *p;
4454 struct hlist_head *h;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004455 unsigned int count = 0, offset = get_offset(*pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004456
Eric Dumazet2def16a2012-04-02 22:33:02 +00004457 h = &net->dev_name_head[get_bucket(*pos)];
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004458 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004459 if (++count == offset)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004460 return dev;
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004461 }
4462
4463 return NULL;
4464}
4465
Eric Dumazet2def16a2012-04-02 22:33:02 +00004466static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004467{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004468 struct net_device *dev;
4469 unsigned int bucket;
4470
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004471 do {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004472 dev = dev_from_same_bucket(seq, pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004473 if (dev)
4474 return dev;
4475
Eric Dumazet2def16a2012-04-02 22:33:02 +00004476 bucket = get_bucket(*pos) + 1;
4477 *pos = set_bucket_offset(bucket, 1);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004478 } while (bucket < NETDEV_HASHENTRIES);
4479
4480 return NULL;
4481}
4482
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483/*
4484 * This is invoked by the /proc filesystem handler to display a device
4485 * in detail.
4486 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004488 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004490 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07004491 if (!*pos)
4492 return SEQ_START_TOKEN;
4493
Eric Dumazet2def16a2012-04-02 22:33:02 +00004494 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004495 return NULL;
Pavel Emelianov7562f872007-05-03 15:13:45 -07004496
Eric Dumazet2def16a2012-04-02 22:33:02 +00004497 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498}
4499
4500void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4501{
4502 ++*pos;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004503 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504}
4505
4506void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004507 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004509 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510}
4511
4512static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4513{
Eric Dumazet28172732010-07-07 14:58:56 -07004514 struct rtnl_link_stats64 temp;
4515 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00004517 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4518 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
Rusty Russell5a1b5892007-04-28 21:04:03 -07004519 dev->name, stats->rx_bytes, stats->rx_packets,
4520 stats->rx_errors,
4521 stats->rx_dropped + stats->rx_missed_errors,
4522 stats->rx_fifo_errors,
4523 stats->rx_length_errors + stats->rx_over_errors +
4524 stats->rx_crc_errors + stats->rx_frame_errors,
4525 stats->rx_compressed, stats->multicast,
4526 stats->tx_bytes, stats->tx_packets,
4527 stats->tx_errors, stats->tx_dropped,
4528 stats->tx_fifo_errors, stats->collisions,
4529 stats->tx_carrier_errors +
4530 stats->tx_aborted_errors +
4531 stats->tx_window_errors +
4532 stats->tx_heartbeat_errors,
4533 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534}
4535
4536/*
4537 * Called from the PROCfs module. This now uses the new arbitrary sized
4538 * /proc/net interface to create /proc/net/dev
4539 */
4540static int dev_seq_show(struct seq_file *seq, void *v)
4541{
4542 if (v == SEQ_START_TOKEN)
4543 seq_puts(seq, "Inter-| Receive "
4544 " | Transmit\n"
4545 " face |bytes packets errs drop fifo frame "
4546 "compressed multicast|bytes packets errs "
4547 "drop fifo colls carrier compressed\n");
4548 else
4549 dev_seq_printf_stats(seq, v);
4550 return 0;
4551}
4552
Changli Gaodee42872010-05-02 05:42:16 +00004553static struct softnet_data *softnet_get_online(loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554{
Changli Gaodee42872010-05-02 05:42:16 +00004555 struct softnet_data *sd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004557 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004558 if (cpu_online(*pos)) {
Changli Gaodee42872010-05-02 05:42:16 +00004559 sd = &per_cpu(softnet_data, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 break;
4561 } else
4562 ++*pos;
Changli Gaodee42872010-05-02 05:42:16 +00004563 return sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564}
4565
4566static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4567{
4568 return softnet_get_online(pos);
4569}
4570
4571static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4572{
4573 ++*pos;
4574 return softnet_get_online(pos);
4575}
4576
4577static void softnet_seq_stop(struct seq_file *seq, void *v)
4578{
4579}
4580
4581static int softnet_seq_show(struct seq_file *seq, void *v)
4582{
Changli Gaodee42872010-05-02 05:42:16 +00004583 struct softnet_data *sd = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584
Tom Herbert0a9627f2010-03-16 08:03:29 +00004585 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Changli Gaodee42872010-05-02 05:42:16 +00004586 sd->processed, sd->dropped, sd->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07004587 0, 0, 0, 0, /* was fastroute */
Changli Gaodee42872010-05-02 05:42:16 +00004588 sd->cpu_collision, sd->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589 return 0;
4590}
4591
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004592static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 .start = dev_seq_start,
4594 .next = dev_seq_next,
4595 .stop = dev_seq_stop,
4596 .show = dev_seq_show,
4597};
4598
4599static int dev_seq_open(struct inode *inode, struct file *file)
4600{
Denis V. Luneve372c412007-11-19 22:31:54 -08004601 return seq_open_net(inode, file, &dev_seq_ops,
Eric Dumazet2def16a2012-04-02 22:33:02 +00004602 sizeof(struct seq_net_private));
Anton Blanchard5cac98d2011-11-27 21:14:46 +00004603}
4604
Arjan van de Ven9a321442007-02-12 00:55:35 -08004605static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 .owner = THIS_MODULE,
4607 .open = dev_seq_open,
4608 .read = seq_read,
4609 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08004610 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611};
4612
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004613static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 .start = softnet_seq_start,
4615 .next = softnet_seq_next,
4616 .stop = softnet_seq_stop,
4617 .show = softnet_seq_show,
4618};
4619
4620static int softnet_seq_open(struct inode *inode, struct file *file)
4621{
4622 return seq_open(file, &softnet_seq_ops);
4623}
4624
Arjan van de Ven9a321442007-02-12 00:55:35 -08004625static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626 .owner = THIS_MODULE,
4627 .open = softnet_seq_open,
4628 .read = seq_read,
4629 .llseek = seq_lseek,
4630 .release = seq_release,
4631};
4632
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004633static void *ptype_get_idx(loff_t pos)
4634{
4635 struct packet_type *pt = NULL;
4636 loff_t i = 0;
4637 int t;
4638
4639 list_for_each_entry_rcu(pt, &ptype_all, list) {
4640 if (i == pos)
4641 return pt;
4642 ++i;
4643 }
4644
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004645 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004646 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4647 if (i == pos)
4648 return pt;
4649 ++i;
4650 }
4651 }
4652 return NULL;
4653}
4654
4655static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004656 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004657{
4658 rcu_read_lock();
4659 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4660}
4661
4662static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4663{
4664 struct packet_type *pt;
4665 struct list_head *nxt;
4666 int hash;
4667
4668 ++*pos;
4669 if (v == SEQ_START_TOKEN)
4670 return ptype_get_idx(0);
4671
4672 pt = v;
4673 nxt = pt->list.next;
4674 if (pt->type == htons(ETH_P_ALL)) {
4675 if (nxt != &ptype_all)
4676 goto found;
4677 hash = 0;
4678 nxt = ptype_base[0].next;
4679 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004680 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004681
4682 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004683 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004684 return NULL;
4685 nxt = ptype_base[hash].next;
4686 }
4687found:
4688 return list_entry(nxt, struct packet_type, list);
4689}
4690
4691static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004692 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004693{
4694 rcu_read_unlock();
4695}
4696
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004697static int ptype_seq_show(struct seq_file *seq, void *v)
4698{
4699 struct packet_type *pt = v;
4700
4701 if (v == SEQ_START_TOKEN)
4702 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004703 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004704 if (pt->type == htons(ETH_P_ALL))
4705 seq_puts(seq, "ALL ");
4706 else
4707 seq_printf(seq, "%04x", ntohs(pt->type));
4708
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08004709 seq_printf(seq, " %-8s %pF\n",
4710 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004711 }
4712
4713 return 0;
4714}
4715
4716static const struct seq_operations ptype_seq_ops = {
4717 .start = ptype_seq_start,
4718 .next = ptype_seq_next,
4719 .stop = ptype_seq_stop,
4720 .show = ptype_seq_show,
4721};
4722
4723static int ptype_seq_open(struct inode *inode, struct file *file)
4724{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004725 return seq_open_net(inode, file, &ptype_seq_ops,
4726 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004727}
4728
4729static const struct file_operations ptype_seq_fops = {
4730 .owner = THIS_MODULE,
4731 .open = ptype_seq_open,
4732 .read = seq_read,
4733 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004734 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004735};
4736
4737
Pavel Emelyanov46650792007-10-08 20:38:39 -07004738static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739{
4740 int rc = -ENOMEM;
4741
Eric W. Biederman881d9662007-09-17 11:56:21 -07004742 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004744 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004746 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004747 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004748
Eric W. Biederman881d9662007-09-17 11:56:21 -07004749 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004750 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 rc = 0;
4752out:
4753 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004754out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004755 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004757 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004759 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 goto out;
4761}
Eric W. Biederman881d9662007-09-17 11:56:21 -07004762
Pavel Emelyanov46650792007-10-08 20:38:39 -07004763static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004764{
4765 wext_proc_exit(net);
4766
4767 proc_net_remove(net, "ptype");
4768 proc_net_remove(net, "softnet_stat");
4769 proc_net_remove(net, "dev");
4770}
4771
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004772static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004773 .init = dev_proc_net_init,
4774 .exit = dev_proc_net_exit,
4775};
4776
4777static int __init dev_proc_init(void)
4778{
4779 return register_pernet_subsys(&dev_proc_ops);
4780}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781#else
4782#define dev_proc_init() 0
4783#endif /* CONFIG_PROC_FS */
4784
4785
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004786struct netdev_upper {
4787 struct net_device *dev;
4788 bool master;
4789 struct list_head list;
4790 struct rcu_head rcu;
4791 struct list_head search_list;
4792};
4793
4794static void __append_search_uppers(struct list_head *search_list,
4795 struct net_device *dev)
4796{
4797 struct netdev_upper *upper;
4798
4799 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4800 /* check if this upper is not already in search list */
4801 if (list_empty(&upper->search_list))
4802 list_add_tail(&upper->search_list, search_list);
4803 }
4804}
4805
4806static bool __netdev_search_upper_dev(struct net_device *dev,
4807 struct net_device *upper_dev)
4808{
4809 LIST_HEAD(search_list);
4810 struct netdev_upper *upper;
4811 struct netdev_upper *tmp;
4812 bool ret = false;
4813
4814 __append_search_uppers(&search_list, dev);
4815 list_for_each_entry(upper, &search_list, search_list) {
4816 if (upper->dev == upper_dev) {
4817 ret = true;
4818 break;
4819 }
4820 __append_search_uppers(&search_list, upper->dev);
4821 }
4822 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4823 INIT_LIST_HEAD(&upper->search_list);
4824 return ret;
4825}
4826
4827static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4828 struct net_device *upper_dev)
4829{
4830 struct netdev_upper *upper;
4831
4832 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4833 if (upper->dev == upper_dev)
4834 return upper;
4835 }
4836 return NULL;
4837}
4838
4839/**
4840 * netdev_has_upper_dev - Check if device is linked to an upper device
4841 * @dev: device
4842 * @upper_dev: upper device to check
4843 *
4844 * Find out if a device is linked to specified upper device and return true
4845 * in case it is. Note that this checks only immediate upper device,
4846 * not through a complete stack of devices. The caller must hold the RTNL lock.
4847 */
4848bool netdev_has_upper_dev(struct net_device *dev,
4849 struct net_device *upper_dev)
4850{
4851 ASSERT_RTNL();
4852
4853 return __netdev_find_upper(dev, upper_dev);
4854}
4855EXPORT_SYMBOL(netdev_has_upper_dev);
4856
4857/**
4858 * netdev_has_any_upper_dev - Check if device is linked to some device
4859 * @dev: device
4860 *
4861 * Find out if a device is linked to an upper device and return true in case
4862 * it is. The caller must hold the RTNL lock.
4863 */
4864bool netdev_has_any_upper_dev(struct net_device *dev)
4865{
4866 ASSERT_RTNL();
4867
4868 return !list_empty(&dev->upper_dev_list);
4869}
4870EXPORT_SYMBOL(netdev_has_any_upper_dev);
4871
4872/**
4873 * netdev_master_upper_dev_get - Get master upper device
4874 * @dev: device
4875 *
4876 * Find a master upper device and return pointer to it or NULL in case
4877 * it's not there. The caller must hold the RTNL lock.
4878 */
4879struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4880{
4881 struct netdev_upper *upper;
4882
4883 ASSERT_RTNL();
4884
4885 if (list_empty(&dev->upper_dev_list))
4886 return NULL;
4887
4888 upper = list_first_entry(&dev->upper_dev_list,
4889 struct netdev_upper, list);
4890 if (likely(upper->master))
4891 return upper->dev;
4892 return NULL;
4893}
4894EXPORT_SYMBOL(netdev_master_upper_dev_get);
4895
4896/**
4897 * netdev_master_upper_dev_get_rcu - Get master upper device
4898 * @dev: device
4899 *
4900 * Find a master upper device and return pointer to it or NULL in case
4901 * it's not there. The caller must hold the RCU read lock.
4902 */
4903struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4904{
4905 struct netdev_upper *upper;
4906
4907 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4908 struct netdev_upper, list);
4909 if (upper && likely(upper->master))
4910 return upper->dev;
4911 return NULL;
4912}
4913EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4914
4915static int __netdev_upper_dev_link(struct net_device *dev,
4916 struct net_device *upper_dev, bool master)
4917{
4918 struct netdev_upper *upper;
4919
4920 ASSERT_RTNL();
4921
4922 if (dev == upper_dev)
4923 return -EBUSY;
4924
4925 /* To prevent loops, check if dev is not upper device to upper_dev. */
4926 if (__netdev_search_upper_dev(upper_dev, dev))
4927 return -EBUSY;
4928
4929 if (__netdev_find_upper(dev, upper_dev))
4930 return -EEXIST;
4931
4932 if (master && netdev_master_upper_dev_get(dev))
4933 return -EBUSY;
4934
4935 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4936 if (!upper)
4937 return -ENOMEM;
4938
4939 upper->dev = upper_dev;
4940 upper->master = master;
4941 INIT_LIST_HEAD(&upper->search_list);
4942
4943 /* Ensure that master upper link is always the first item in list. */
4944 if (master)
4945 list_add_rcu(&upper->list, &dev->upper_dev_list);
4946 else
4947 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4948 dev_hold(upper_dev);
4949
4950 return 0;
4951}
4952
4953/**
4954 * netdev_upper_dev_link - Add a link to the upper device
4955 * @dev: device
4956 * @upper_dev: new upper device
4957 *
4958 * Adds a link to device which is upper to this one. The caller must hold
4959 * the RTNL lock. On a failure a negative errno code is returned.
4960 * On success the reference counts are adjusted and the function
4961 * returns zero.
4962 */
4963int netdev_upper_dev_link(struct net_device *dev,
4964 struct net_device *upper_dev)
4965{
4966 return __netdev_upper_dev_link(dev, upper_dev, false);
4967}
4968EXPORT_SYMBOL(netdev_upper_dev_link);
4969
4970/**
4971 * netdev_master_upper_dev_link - Add a master link to the upper device
4972 * @dev: device
4973 * @upper_dev: new upper device
4974 *
4975 * Adds a link to device which is upper to this one. In this case, only
4976 * one master upper device can be linked, although other non-master devices
4977 * might be linked as well. The caller must hold the RTNL lock.
4978 * On a failure a negative errno code is returned. On success the reference
4979 * counts are adjusted and the function returns zero.
4980 */
4981int netdev_master_upper_dev_link(struct net_device *dev,
4982 struct net_device *upper_dev)
4983{
4984 return __netdev_upper_dev_link(dev, upper_dev, true);
4985}
4986EXPORT_SYMBOL(netdev_master_upper_dev_link);
4987
4988/**
4989 * netdev_upper_dev_unlink - Removes a link to upper device
4990 * @dev: device
4991 * @upper_dev: new upper device
4992 *
4993 * Removes a link to device which is upper to this one. The caller must hold
4994 * the RTNL lock.
4995 */
4996void netdev_upper_dev_unlink(struct net_device *dev,
4997 struct net_device *upper_dev)
4998{
4999 struct netdev_upper *upper;
5000
5001 ASSERT_RTNL();
5002
5003 upper = __netdev_find_upper(dev, upper_dev);
5004 if (!upper)
5005 return;
5006 list_del_rcu(&upper->list);
5007 dev_put(upper_dev);
5008 kfree_rcu(upper, rcu);
5009}
5010EXPORT_SYMBOL(netdev_upper_dev_unlink);
5011
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005012static void dev_change_rx_flags(struct net_device *dev, int flags)
5013{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005014 const struct net_device_ops *ops = dev->netdev_ops;
5015
5016 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
5017 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005018}
5019
Wang Chendad9b332008-06-18 01:48:28 -07005020static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07005021{
Eric Dumazetb536db92011-11-30 21:42:26 +00005022 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005023 kuid_t uid;
5024 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005025
Patrick McHardy24023452007-07-14 18:51:31 -07005026 ASSERT_RTNL();
5027
Wang Chendad9b332008-06-18 01:48:28 -07005028 dev->flags |= IFF_PROMISC;
5029 dev->promiscuity += inc;
5030 if (dev->promiscuity == 0) {
5031 /*
5032 * Avoid overflow.
5033 * If inc causes overflow, untouch promisc and return error.
5034 */
5035 if (inc < 0)
5036 dev->flags &= ~IFF_PROMISC;
5037 else {
5038 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005039 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5040 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005041 return -EOVERFLOW;
5042 }
5043 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005044 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005045 pr_info("device %s %s promiscuous mode\n",
5046 dev->name,
5047 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005048 if (audit_enabled) {
5049 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005050 audit_log(current->audit_context, GFP_ATOMIC,
5051 AUDIT_ANOM_PROMISCUOUS,
5052 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5053 dev->name, (dev->flags & IFF_PROMISC),
5054 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005055 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005056 from_kuid(&init_user_ns, uid),
5057 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005058 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005059 }
Patrick McHardy24023452007-07-14 18:51:31 -07005060
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005061 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005062 }
Wang Chendad9b332008-06-18 01:48:28 -07005063 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005064}
5065
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066/**
5067 * dev_set_promiscuity - update promiscuity count on a device
5068 * @dev: device
5069 * @inc: modifier
5070 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005071 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 * remains above zero the interface remains promiscuous. Once it hits zero
5073 * the device reverts back to normal filtering operation. A negative inc
5074 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005075 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076 */
Wang Chendad9b332008-06-18 01:48:28 -07005077int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078{
Eric Dumazetb536db92011-11-30 21:42:26 +00005079 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005080 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081
Wang Chendad9b332008-06-18 01:48:28 -07005082 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005083 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005084 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005085 if (dev->flags != old_flags)
5086 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005087 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005089EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090
5091/**
5092 * dev_set_allmulti - update allmulti count on a device
5093 * @dev: device
5094 * @inc: modifier
5095 *
5096 * Add or remove reception of all multicast frames to a device. While the
5097 * count in the device remains above zero the interface remains listening
5098 * to all interfaces. Once it hits zero the device reverts back to normal
5099 * filtering operation. A negative @inc value is used to drop the counter
5100 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07005101 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102 */
5103
Wang Chendad9b332008-06-18 01:48:28 -07005104int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105{
Eric Dumazetb536db92011-11-30 21:42:26 +00005106 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107
Patrick McHardy24023452007-07-14 18:51:31 -07005108 ASSERT_RTNL();
5109
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005111 dev->allmulti += inc;
5112 if (dev->allmulti == 0) {
5113 /*
5114 * Avoid overflow.
5115 * If inc causes overflow, untouch allmulti and return error.
5116 */
5117 if (inc < 0)
5118 dev->flags &= ~IFF_ALLMULTI;
5119 else {
5120 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005121 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5122 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005123 return -EOVERFLOW;
5124 }
5125 }
Patrick McHardy24023452007-07-14 18:51:31 -07005126 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005127 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005128 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07005129 }
Wang Chendad9b332008-06-18 01:48:28 -07005130 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005131}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005132EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005133
5134/*
5135 * Upload unicast and multicast address lists to device and
5136 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005137 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005138 * are present.
5139 */
5140void __dev_set_rx_mode(struct net_device *dev)
5141{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005142 const struct net_device_ops *ops = dev->netdev_ops;
5143
Patrick McHardy4417da62007-06-27 01:28:10 -07005144 /* dev_open will call this function so the list will stay sane. */
5145 if (!(dev->flags&IFF_UP))
5146 return;
5147
5148 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005149 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005150
Jiri Pirko01789342011-08-16 06:29:00 +00005151 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005152 /* Unicast addresses changes may only happen under the rtnl,
5153 * therefore calling __dev_set_promiscuity here is safe.
5154 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005155 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005156 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07005157 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005158 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005159 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07005160 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005161 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005162 }
Jiri Pirko01789342011-08-16 06:29:00 +00005163
5164 if (ops->ndo_set_rx_mode)
5165 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005166}
5167
5168void dev_set_rx_mode(struct net_device *dev)
5169{
David S. Millerb9e40852008-07-15 00:15:08 -07005170 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005171 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005172 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173}
5174
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005175/**
5176 * dev_get_flags - get flags reported to userspace
5177 * @dev: device
5178 *
5179 * Get the combination of flag bits exported through APIs to userspace.
5180 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005181unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182{
Eric Dumazet95c96172012-04-15 05:58:06 +00005183 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184
5185 flags = (dev->flags & ~(IFF_PROMISC |
5186 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005187 IFF_RUNNING |
5188 IFF_LOWER_UP |
5189 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 (dev->gflags & (IFF_PROMISC |
5191 IFF_ALLMULTI));
5192
Stefan Rompfb00055a2006-03-20 17:09:11 -08005193 if (netif_running(dev)) {
5194 if (netif_oper_up(dev))
5195 flags |= IFF_RUNNING;
5196 if (netif_carrier_ok(dev))
5197 flags |= IFF_LOWER_UP;
5198 if (netif_dormant(dev))
5199 flags |= IFF_DORMANT;
5200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201
5202 return flags;
5203}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005204EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205
Patrick McHardybd380812010-02-26 06:34:53 +00005206int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207{
Eric Dumazetb536db92011-11-30 21:42:26 +00005208 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005209 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210
Patrick McHardy24023452007-07-14 18:51:31 -07005211 ASSERT_RTNL();
5212
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213 /*
5214 * Set the flags on our device.
5215 */
5216
5217 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5218 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5219 IFF_AUTOMEDIA)) |
5220 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5221 IFF_ALLMULTI));
5222
5223 /*
5224 * Load in the correct multicast list now the flags have changed.
5225 */
5226
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005227 if ((old_flags ^ flags) & IFF_MULTICAST)
5228 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005229
Patrick McHardy4417da62007-06-27 01:28:10 -07005230 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231
5232 /*
5233 * Have we downed the interface. We handle IFF_UP ourselves
5234 * according to user attempts to set it, rather than blindly
5235 * setting it.
5236 */
5237
5238 ret = 0;
5239 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005240 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241
5242 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005243 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 }
5245
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005247 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5248
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249 dev->gflags ^= IFF_PROMISC;
5250 dev_set_promiscuity(dev, inc);
5251 }
5252
5253 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5254 is important. Some (broken) drivers set IFF_PROMISC, when
5255 IFF_ALLMULTI is requested not asking us and not reporting.
5256 */
5257 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005258 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5259
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 dev->gflags ^= IFF_ALLMULTI;
5261 dev_set_allmulti(dev, inc);
5262 }
5263
Patrick McHardybd380812010-02-26 06:34:53 +00005264 return ret;
5265}
5266
5267void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
5268{
5269 unsigned int changes = dev->flags ^ old_flags;
5270
5271 if (changes & IFF_UP) {
5272 if (dev->flags & IFF_UP)
5273 call_netdevice_notifiers(NETDEV_UP, dev);
5274 else
5275 call_netdevice_notifiers(NETDEV_DOWN, dev);
5276 }
5277
5278 if (dev->flags & IFF_UP &&
5279 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
5280 call_netdevice_notifiers(NETDEV_CHANGE, dev);
5281}
5282
5283/**
5284 * dev_change_flags - change device settings
5285 * @dev: device
5286 * @flags: device state flags
5287 *
5288 * Change settings on device based state flags. The flags are
5289 * in the userspace exported format.
5290 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005291int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005292{
Eric Dumazetb536db92011-11-30 21:42:26 +00005293 int ret;
5294 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005295
5296 ret = __dev_change_flags(dev, flags);
5297 if (ret < 0)
5298 return ret;
5299
5300 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07005301 if (changes)
5302 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005303
Patrick McHardybd380812010-02-26 06:34:53 +00005304 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 return ret;
5306}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005307EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005309/**
5310 * dev_set_mtu - Change maximum transfer unit
5311 * @dev: device
5312 * @new_mtu: new transfer unit
5313 *
5314 * Change the maximum transfer size of the network device.
5315 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316int dev_set_mtu(struct net_device *dev, int new_mtu)
5317{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005318 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319 int err;
5320
5321 if (new_mtu == dev->mtu)
5322 return 0;
5323
5324 /* MTU must be positive. */
5325 if (new_mtu < 0)
5326 return -EINVAL;
5327
5328 if (!netif_device_present(dev))
5329 return -ENODEV;
5330
5331 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005332 if (ops->ndo_change_mtu)
5333 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334 else
5335 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005336
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005337 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005338 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339 return err;
5340}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005341EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005343/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005344 * dev_set_group - Change group this device belongs to
5345 * @dev: device
5346 * @new_group: group this device should belong to
5347 */
5348void dev_set_group(struct net_device *dev, int new_group)
5349{
5350 dev->group = new_group;
5351}
5352EXPORT_SYMBOL(dev_set_group);
5353
5354/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005355 * dev_set_mac_address - Change Media Access Control Address
5356 * @dev: device
5357 * @sa: new address
5358 *
5359 * Change the hardware (MAC) address of the device
5360 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5362{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005363 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364 int err;
5365
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005366 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367 return -EOPNOTSUPP;
5368 if (sa->sa_family != dev->type)
5369 return -EINVAL;
5370 if (!netif_device_present(dev))
5371 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005372 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005373 if (err)
5374 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005375 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005376 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005377 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005378 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005380EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005382/**
5383 * dev_change_carrier - Change device carrier
5384 * @dev: device
5385 * @new_carries: new value
5386 *
5387 * Change device carrier
5388 */
5389int dev_change_carrier(struct net_device *dev, bool new_carrier)
5390{
5391 const struct net_device_ops *ops = dev->netdev_ops;
5392
5393 if (!ops->ndo_change_carrier)
5394 return -EOPNOTSUPP;
5395 if (!netif_device_present(dev))
5396 return -ENODEV;
5397 return ops->ndo_change_carrier(dev, new_carrier);
5398}
5399EXPORT_SYMBOL(dev_change_carrier);
5400
Linus Torvalds1da177e2005-04-16 15:20:36 -07005401/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00005402 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07005404static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405{
5406 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00005407 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408
5409 if (!dev)
5410 return -ENODEV;
5411
5412 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005413 case SIOCGIFFLAGS: /* Get interface flags */
5414 ifr->ifr_flags = (short) dev_get_flags(dev);
5415 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005417 case SIOCGIFMETRIC: /* Get the metric on the interface
5418 (currently unused) */
5419 ifr->ifr_metric = 0;
5420 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005421
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005422 case SIOCGIFMTU: /* Get the MTU of a device */
5423 ifr->ifr_mtu = dev->mtu;
5424 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005426 case SIOCGIFHWADDR:
5427 if (!dev->addr_len)
5428 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5429 else
5430 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5431 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5432 ifr->ifr_hwaddr.sa_family = dev->type;
5433 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005435 case SIOCGIFSLAVE:
5436 err = -EINVAL;
5437 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005438
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005439 case SIOCGIFMAP:
5440 ifr->ifr_map.mem_start = dev->mem_start;
5441 ifr->ifr_map.mem_end = dev->mem_end;
5442 ifr->ifr_map.base_addr = dev->base_addr;
5443 ifr->ifr_map.irq = dev->irq;
5444 ifr->ifr_map.dma = dev->dma;
5445 ifr->ifr_map.port = dev->if_port;
5446 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005447
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005448 case SIOCGIFINDEX:
5449 ifr->ifr_ifindex = dev->ifindex;
5450 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005451
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005452 case SIOCGIFTXQLEN:
5453 ifr->ifr_qlen = dev->tx_queue_len;
5454 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005455
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005456 default:
5457 /* dev_ioctl() should ensure this case
5458 * is never reached
5459 */
5460 WARN_ON(1);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005461 err = -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005462 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005463
5464 }
5465 return err;
5466}
5467
5468/*
5469 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5470 */
5471static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5472{
5473 int err;
5474 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005475 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005476
5477 if (!dev)
5478 return -ENODEV;
5479
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005480 ops = dev->netdev_ops;
5481
Jeff Garzik14e3e072007-10-08 00:06:32 -07005482 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005483 case SIOCSIFFLAGS: /* Set interface flags */
5484 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005485
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005486 case SIOCSIFMETRIC: /* Set the metric on the interface
5487 (currently unused) */
5488 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005489
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005490 case SIOCSIFMTU: /* Set the MTU of a device */
5491 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005492
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005493 case SIOCSIFHWADDR:
5494 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005496 case SIOCSIFHWBROADCAST:
5497 if (ifr->ifr_hwaddr.sa_family != dev->type)
5498 return -EINVAL;
5499 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5500 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5501 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5502 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005504 case SIOCSIFMAP:
5505 if (ops->ndo_set_config) {
5506 if (!netif_device_present(dev))
5507 return -ENODEV;
5508 return ops->ndo_set_config(dev, &ifr->ifr_map);
5509 }
5510 return -EOPNOTSUPP;
5511
5512 case SIOCADDMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005513 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005514 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5515 return -EINVAL;
5516 if (!netif_device_present(dev))
5517 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005518 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005519
5520 case SIOCDELMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005521 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005522 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5523 return -EINVAL;
5524 if (!netif_device_present(dev))
5525 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005526 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005527
5528 case SIOCSIFTXQLEN:
5529 if (ifr->ifr_qlen < 0)
5530 return -EINVAL;
5531 dev->tx_queue_len = ifr->ifr_qlen;
5532 return 0;
5533
5534 case SIOCSIFNAME:
5535 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5536 return dev_change_name(dev, ifr->ifr_newname);
5537
Richard Cochran4dc360c2011-10-19 17:00:35 -04005538 case SIOCSHWTSTAMP:
5539 err = net_hwtstamp_validate(ifr);
5540 if (err)
5541 return err;
5542 /* fall through */
5543
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005544 /*
5545 * Unknown or private ioctl
5546 */
5547 default:
5548 if ((cmd >= SIOCDEVPRIVATE &&
5549 cmd <= SIOCDEVPRIVATE + 15) ||
5550 cmd == SIOCBONDENSLAVE ||
5551 cmd == SIOCBONDRELEASE ||
5552 cmd == SIOCBONDSETHWADDR ||
5553 cmd == SIOCBONDSLAVEINFOQUERY ||
5554 cmd == SIOCBONDINFOQUERY ||
5555 cmd == SIOCBONDCHANGEACTIVE ||
5556 cmd == SIOCGMIIPHY ||
5557 cmd == SIOCGMIIREG ||
5558 cmd == SIOCSMIIREG ||
5559 cmd == SIOCBRADDIF ||
5560 cmd == SIOCBRDELIF ||
5561 cmd == SIOCSHWTSTAMP ||
5562 cmd == SIOCWANDEV) {
5563 err = -EOPNOTSUPP;
5564 if (ops->ndo_do_ioctl) {
5565 if (netif_device_present(dev))
5566 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5567 else
5568 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005570 } else
5571 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005572
5573 }
5574 return err;
5575}
5576
5577/*
5578 * This function handles all "interface"-type I/O control requests. The actual
5579 * 'doing' part of this is dev_ifsioc above.
5580 */
5581
5582/**
5583 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005584 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005585 * @cmd: command to issue
5586 * @arg: pointer to a struct ifreq in user space
5587 *
5588 * Issue ioctl functions to devices. This is normally called by the
5589 * user space syscall interfaces but can sometimes be useful for
5590 * other purposes. The return value is the return from the syscall if
5591 * positive or a negative errno code on error.
5592 */
5593
Eric W. Biederman881d9662007-09-17 11:56:21 -07005594int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005595{
5596 struct ifreq ifr;
5597 int ret;
5598 char *colon;
5599
5600 /* One special case: SIOCGIFCONF takes ifconf argument
5601 and requires shared lock, because it sleeps writing
5602 to user space.
5603 */
5604
5605 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005606 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005607 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005608 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005609 return ret;
5610 }
5611 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005612 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005613
5614 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5615 return -EFAULT;
5616
5617 ifr.ifr_name[IFNAMSIZ-1] = 0;
5618
5619 colon = strchr(ifr.ifr_name, ':');
5620 if (colon)
5621 *colon = 0;
5622
5623 /*
5624 * See which interface the caller is talking about.
5625 */
5626
5627 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005628 /*
5629 * These ioctl calls:
5630 * - can be done by all.
5631 * - atomic and do not require locking.
5632 * - return a value
5633 */
5634 case SIOCGIFFLAGS:
5635 case SIOCGIFMETRIC:
5636 case SIOCGIFMTU:
5637 case SIOCGIFHWADDR:
5638 case SIOCGIFSLAVE:
5639 case SIOCGIFMAP:
5640 case SIOCGIFINDEX:
5641 case SIOCGIFTXQLEN:
5642 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005643 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005644 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005645 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005646 if (!ret) {
5647 if (colon)
5648 *colon = ':';
5649 if (copy_to_user(arg, &ifr,
5650 sizeof(struct ifreq)))
5651 ret = -EFAULT;
5652 }
5653 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005655 case SIOCETHTOOL:
5656 dev_load(net, ifr.ifr_name);
5657 rtnl_lock();
5658 ret = dev_ethtool(net, &ifr);
5659 rtnl_unlock();
5660 if (!ret) {
5661 if (colon)
5662 *colon = ':';
5663 if (copy_to_user(arg, &ifr,
5664 sizeof(struct ifreq)))
5665 ret = -EFAULT;
5666 }
5667 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005669 /*
5670 * These ioctl calls:
5671 * - require superuser power.
5672 * - require strict serialization.
5673 * - return a value
5674 */
5675 case SIOCGMIIPHY:
5676 case SIOCGMIIREG:
5677 case SIOCSIFNAME:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005678 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005679 return -EPERM;
5680 dev_load(net, ifr.ifr_name);
5681 rtnl_lock();
5682 ret = dev_ifsioc(net, &ifr, cmd);
5683 rtnl_unlock();
5684 if (!ret) {
5685 if (colon)
5686 *colon = ':';
5687 if (copy_to_user(arg, &ifr,
5688 sizeof(struct ifreq)))
5689 ret = -EFAULT;
5690 }
5691 return ret;
5692
5693 /*
5694 * These ioctl calls:
5695 * - require superuser power.
5696 * - require strict serialization.
5697 * - do not return a value
5698 */
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005699 case SIOCSIFMAP:
5700 case SIOCSIFTXQLEN:
5701 if (!capable(CAP_NET_ADMIN))
5702 return -EPERM;
5703 /* fall through */
5704 /*
5705 * These ioctl calls:
5706 * - require local superuser power.
5707 * - require strict serialization.
5708 * - do not return a value
5709 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005710 case SIOCSIFFLAGS:
5711 case SIOCSIFMETRIC:
5712 case SIOCSIFMTU:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005713 case SIOCSIFHWADDR:
5714 case SIOCSIFSLAVE:
5715 case SIOCADDMULTI:
5716 case SIOCDELMULTI:
5717 case SIOCSIFHWBROADCAST:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005718 case SIOCSMIIREG:
5719 case SIOCBONDENSLAVE:
5720 case SIOCBONDRELEASE:
5721 case SIOCBONDSETHWADDR:
5722 case SIOCBONDCHANGEACTIVE:
5723 case SIOCBRADDIF:
5724 case SIOCBRDELIF:
5725 case SIOCSHWTSTAMP:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005726 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005727 return -EPERM;
5728 /* fall through */
5729 case SIOCBONDSLAVEINFOQUERY:
5730 case SIOCBONDINFOQUERY:
5731 dev_load(net, ifr.ifr_name);
5732 rtnl_lock();
5733 ret = dev_ifsioc(net, &ifr, cmd);
5734 rtnl_unlock();
5735 return ret;
5736
5737 case SIOCGIFMEM:
5738 /* Get the per device memory space. We can add this but
5739 * currently do not support it */
5740 case SIOCSIFMEM:
5741 /* Set the per device memory buffer space.
5742 * Not applicable in our case */
5743 case SIOCSIFLINK:
Lifeng Sun41c31f32011-04-27 22:04:51 +00005744 return -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005745
5746 /*
5747 * Unknown or private ioctl.
5748 */
5749 default:
5750 if (cmd == SIOCWANDEV ||
5751 (cmd >= SIOCDEVPRIVATE &&
5752 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005753 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005754 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005755 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005756 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005757 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005758 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005759 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005760 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005761 }
5762 /* Take care of Wireless Extensions */
5763 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5764 return wext_handle_ioctl(net, &ifr, cmd, arg);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005765 return -ENOTTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766 }
5767}
5768
5769
5770/**
5771 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005772 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005773 *
5774 * Returns a suitable unique value for a new device interface
5775 * number. The caller must hold the rtnl semaphore or the
5776 * dev_base_lock to be sure it remains unique.
5777 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005778static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005779{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005780 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005781 for (;;) {
5782 if (++ifindex <= 0)
5783 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005784 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005785 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005786 }
5787}
5788
Linus Torvalds1da177e2005-04-16 15:20:36 -07005789/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005790static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005792static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005794 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795}
5796
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005797static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005798{
Krishna Kumare93737b2009-12-08 22:26:02 +00005799 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005800
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005801 BUG_ON(dev_boot_phase);
5802 ASSERT_RTNL();
5803
Krishna Kumare93737b2009-12-08 22:26:02 +00005804 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005805 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005806 * for initialization unwind. Remove those
5807 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005808 */
5809 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005810 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5811 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005812
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005813 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005814 list_del(&dev->unreg_list);
5815 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005816 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005817 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005818 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005819 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005820
Octavian Purdila44345722010-12-13 12:44:07 +00005821 /* If device is running, close it first. */
5822 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005823
Octavian Purdila44345722010-12-13 12:44:07 +00005824 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005825 /* And unlink it from device chain. */
5826 unlist_netdevice(dev);
5827
5828 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005829 }
5830
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005831 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005832
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005833 list_for_each_entry(dev, head, unreg_list) {
5834 /* Shutdown queueing discipline. */
5835 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005836
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005837
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005838 /* Notify protocols, that we are about to destroy
5839 this device. They should clean all the things.
5840 */
5841 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5842
Patrick McHardya2835762010-02-26 06:34:51 +00005843 if (!dev->rtnl_link_ops ||
5844 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5845 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5846
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005847 /*
5848 * Flush the unicast and multicast chains
5849 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005850 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005851 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005852
5853 if (dev->netdev_ops->ndo_uninit)
5854 dev->netdev_ops->ndo_uninit(dev);
5855
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005856 /* Notifier chain MUST detach us all upper devices. */
5857 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005858
5859 /* Remove entries from kobject tree */
5860 netdev_unregister_kobject(dev);
5861 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005862
Eric W. Biederman850a5452011-10-13 22:25:23 +00005863 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005864
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005865 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005866 dev_put(dev);
5867}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005868
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005869static void rollback_registered(struct net_device *dev)
5870{
5871 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005872
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005873 list_add(&dev->unreg_list, &single);
5874 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005875 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005876}
5877
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005878static netdev_features_t netdev_fix_features(struct net_device *dev,
5879 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005880{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005881 /* Fix illegal checksum combinations */
5882 if ((features & NETIF_F_HW_CSUM) &&
5883 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005884 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005885 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5886 }
5887
Herbert Xub63365a2008-10-23 01:11:29 -07005888 /* Fix illegal SG+CSUM combinations. */
5889 if ((features & NETIF_F_SG) &&
5890 !(features & NETIF_F_ALL_CSUM)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005891 netdev_dbg(dev,
5892 "Dropping NETIF_F_SG since no checksum feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005893 features &= ~NETIF_F_SG;
5894 }
5895
5896 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005897 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005898 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005899 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005900 }
5901
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005902 /* TSO ECN requires that TSO is present as well. */
5903 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5904 features &= ~NETIF_F_TSO_ECN;
5905
Michał Mirosław212b5732011-02-15 16:59:16 +00005906 /* Software GSO depends on SG. */
5907 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005908 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005909 features &= ~NETIF_F_GSO;
5910 }
5911
Michał Mirosławacd11302011-01-24 15:45:15 -08005912 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005913 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005914 /* maybe split UFO into V4 and V6? */
5915 if (!((features & NETIF_F_GEN_CSUM) ||
5916 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5917 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005918 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005919 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005920 features &= ~NETIF_F_UFO;
5921 }
5922
5923 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005924 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005925 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005926 features &= ~NETIF_F_UFO;
5927 }
5928 }
5929
5930 return features;
5931}
Herbert Xub63365a2008-10-23 01:11:29 -07005932
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005933int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005934{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005935 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005936 int err = 0;
5937
Michał Mirosław87267482011-04-12 09:56:38 +00005938 ASSERT_RTNL();
5939
Michał Mirosław5455c692011-02-15 16:59:17 +00005940 features = netdev_get_wanted_features(dev);
5941
5942 if (dev->netdev_ops->ndo_fix_features)
5943 features = dev->netdev_ops->ndo_fix_features(dev, features);
5944
5945 /* driver might be less strict about feature dependencies */
5946 features = netdev_fix_features(dev, features);
5947
5948 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005949 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005950
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005951 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5952 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005953
5954 if (dev->netdev_ops->ndo_set_features)
5955 err = dev->netdev_ops->ndo_set_features(dev, features);
5956
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005957 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005958 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005959 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5960 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005961 return -1;
5962 }
5963
5964 if (!err)
5965 dev->features = features;
5966
5967 return 1;
5968}
5969
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005970/**
5971 * netdev_update_features - recalculate device features
5972 * @dev: the device to check
5973 *
5974 * Recalculate dev->features set and send notifications if it
5975 * has changed. Should be called after driver or hardware dependent
5976 * conditions might have changed that influence the features.
5977 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005978void netdev_update_features(struct net_device *dev)
5979{
5980 if (__netdev_update_features(dev))
5981 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005982}
5983EXPORT_SYMBOL(netdev_update_features);
5984
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005986 * netdev_change_features - recalculate device features
5987 * @dev: the device to check
5988 *
5989 * Recalculate dev->features set and send notifications even
5990 * if they have not changed. Should be called instead of
5991 * netdev_update_features() if also dev->vlan_features might
5992 * have changed to allow the changes to be propagated to stacked
5993 * VLAN devices.
5994 */
5995void netdev_change_features(struct net_device *dev)
5996{
5997 __netdev_update_features(dev);
5998 netdev_features_change(dev);
5999}
6000EXPORT_SYMBOL(netdev_change_features);
6001
6002/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006003 * netif_stacked_transfer_operstate - transfer operstate
6004 * @rootdev: the root or lower level device to transfer state from
6005 * @dev: the device to transfer operstate to
6006 *
6007 * Transfer operational state from root to device. This is normally
6008 * called when a stacking relationship exists between the root
6009 * device and the device(a leaf device).
6010 */
6011void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6012 struct net_device *dev)
6013{
6014 if (rootdev->operstate == IF_OPER_DORMANT)
6015 netif_dormant_on(dev);
6016 else
6017 netif_dormant_off(dev);
6018
6019 if (netif_carrier_ok(rootdev)) {
6020 if (!netif_carrier_ok(dev))
6021 netif_carrier_on(dev);
6022 } else {
6023 if (netif_carrier_ok(dev))
6024 netif_carrier_off(dev);
6025 }
6026}
6027EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6028
Tom Herbertbf264142010-11-26 08:36:09 +00006029#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006030static int netif_alloc_rx_queues(struct net_device *dev)
6031{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006032 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006033 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006034
Tom Herbertbd25fa72010-10-18 18:00:16 +00006035 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006036
Tom Herbertbd25fa72010-10-18 18:00:16 +00006037 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6038 if (!rx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006039 pr_err("netdev: Unable to allocate %u rx queues\n", count);
Tom Herbertbd25fa72010-10-18 18:00:16 +00006040 return -ENOMEM;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006041 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006042 dev->_rx = rx;
6043
Tom Herbertbd25fa72010-10-18 18:00:16 +00006044 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006045 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006046 return 0;
6047}
Tom Herbertbf264142010-11-26 08:36:09 +00006048#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006049
Changli Gaoaa942102010-12-04 02:31:41 +00006050static void netdev_init_one_queue(struct net_device *dev,
6051 struct netdev_queue *queue, void *_unused)
6052{
6053 /* Initialize queue lock */
6054 spin_lock_init(&queue->_xmit_lock);
6055 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6056 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006057 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006058 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006059#ifdef CONFIG_BQL
6060 dql_init(&queue->dql, HZ);
6061#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006062}
6063
Tom Herberte6484932010-10-18 18:04:39 +00006064static int netif_alloc_netdev_queues(struct net_device *dev)
6065{
6066 unsigned int count = dev->num_tx_queues;
6067 struct netdev_queue *tx;
6068
6069 BUG_ON(count < 1);
6070
6071 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
6072 if (!tx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006073 pr_err("netdev: Unable to allocate %u tx queues\n", count);
Tom Herberte6484932010-10-18 18:04:39 +00006074 return -ENOMEM;
6075 }
6076 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006077
Tom Herberte6484932010-10-18 18:04:39 +00006078 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6079 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006080
6081 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006082}
6083
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006084/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006085 * register_netdevice - register a network device
6086 * @dev: device to register
6087 *
6088 * Take a completed network device structure and add it to the kernel
6089 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6090 * chain. 0 is returned on success. A negative errno code is returned
6091 * on a failure to set up the device, or if the name is a duplicate.
6092 *
6093 * Callers must hold the rtnl semaphore. You may want
6094 * register_netdev() instead of this.
6095 *
6096 * BUGS:
6097 * The locking appears insufficient to guarantee two parallel registers
6098 * will not get the same name.
6099 */
6100
6101int register_netdevice(struct net_device *dev)
6102{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006103 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006104 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006105
6106 BUG_ON(dev_boot_phase);
6107 ASSERT_RTNL();
6108
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006109 might_sleep();
6110
Linus Torvalds1da177e2005-04-16 15:20:36 -07006111 /* When net_device's are persistent, this will be fatal. */
6112 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006113 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006114
David S. Millerf1f28aa2008-07-15 00:08:33 -07006115 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006116 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006117
Linus Torvalds1da177e2005-04-16 15:20:36 -07006118 dev->iflink = -1;
6119
Gao feng828de4f2012-09-13 20:58:27 +00006120 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006121 if (ret < 0)
6122 goto out;
6123
Linus Torvalds1da177e2005-04-16 15:20:36 -07006124 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006125 if (dev->netdev_ops->ndo_init) {
6126 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127 if (ret) {
6128 if (ret > 0)
6129 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006130 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006131 }
6132 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006133
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006134 ret = -EBUSY;
6135 if (!dev->ifindex)
6136 dev->ifindex = dev_new_index(net);
6137 else if (__dev_get_by_index(net, dev->ifindex))
6138 goto err_uninit;
6139
Linus Torvalds1da177e2005-04-16 15:20:36 -07006140 if (dev->iflink == -1)
6141 dev->iflink = dev->ifindex;
6142
Michał Mirosław5455c692011-02-15 16:59:17 +00006143 /* Transfer changeable features to wanted_features and enable
6144 * software offloads (GSO and GRO).
6145 */
6146 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006147 dev->features |= NETIF_F_SOFT_FEATURES;
6148 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006149
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006150 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00006151 if (!(dev->flags & IFF_LOOPBACK)) {
6152 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6153 if (dev->features & NETIF_F_ALL_CSUM) {
6154 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
6155 dev->features |= NETIF_F_NOCACHE_COPY;
6156 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006157 }
6158
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006159 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006160 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006161 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006162
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006163 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6164 ret = notifier_to_errno(ret);
6165 if (ret)
6166 goto err_uninit;
6167
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006168 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006169 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006170 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006171 dev->reg_state = NETREG_REGISTERED;
6172
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006173 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006174
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175 /*
6176 * Default initial state at registry is that the
6177 * device is present.
6178 */
6179
6180 set_bit(__LINK_STATE_PRESENT, &dev->state);
6181
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006182 linkwatch_init_dev(dev);
6183
Linus Torvalds1da177e2005-04-16 15:20:36 -07006184 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006186 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006187 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006188
Jiri Pirko948b3372013-01-08 01:38:25 +00006189 /* If the device has permanent device address, driver should
6190 * set dev_addr and also addr_assign_type should be set to
6191 * NET_ADDR_PERM (default value).
6192 */
6193 if (dev->addr_assign_type == NET_ADDR_PERM)
6194 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6195
Linus Torvalds1da177e2005-04-16 15:20:36 -07006196 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006197 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006198 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006199 if (ret) {
6200 rollback_registered(dev);
6201 dev->reg_state = NETREG_UNREGISTERED;
6202 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006203 /*
6204 * Prevent userspace races by waiting until the network
6205 * device is fully setup before sending notifications.
6206 */
Patrick McHardya2835762010-02-26 06:34:51 +00006207 if (!dev->rtnl_link_ops ||
6208 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6209 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006210
6211out:
6212 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006213
6214err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006215 if (dev->netdev_ops->ndo_uninit)
6216 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006217 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006218}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006219EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006220
6221/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006222 * init_dummy_netdev - init a dummy network device for NAPI
6223 * @dev: device to init
6224 *
6225 * This takes a network device structure and initialize the minimum
6226 * amount of fields so it can be used to schedule NAPI polls without
6227 * registering a full blown interface. This is to be used by drivers
6228 * that need to tie several hardware interfaces to a single NAPI
6229 * poll scheduler due to HW limitations.
6230 */
6231int init_dummy_netdev(struct net_device *dev)
6232{
6233 /* Clear everything. Note we don't initialize spinlocks
6234 * are they aren't supposed to be taken by any of the
6235 * NAPI code and this dummy netdev is supposed to be
6236 * only ever used for NAPI polls
6237 */
6238 memset(dev, 0, sizeof(struct net_device));
6239
6240 /* make sure we BUG if trying to hit standard
6241 * register/unregister code path
6242 */
6243 dev->reg_state = NETREG_DUMMY;
6244
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006245 /* NAPI wants this */
6246 INIT_LIST_HEAD(&dev->napi_list);
6247
6248 /* a dummy interface is started by default */
6249 set_bit(__LINK_STATE_PRESENT, &dev->state);
6250 set_bit(__LINK_STATE_START, &dev->state);
6251
Eric Dumazet29b44332010-10-11 10:22:12 +00006252 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6253 * because users of this 'device' dont need to change
6254 * its refcount.
6255 */
6256
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006257 return 0;
6258}
6259EXPORT_SYMBOL_GPL(init_dummy_netdev);
6260
6261
6262/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006263 * register_netdev - register a network device
6264 * @dev: device to register
6265 *
6266 * Take a completed network device structure and add it to the kernel
6267 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6268 * chain. 0 is returned on success. A negative errno code is returned
6269 * on a failure to set up the device, or if the name is a duplicate.
6270 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006271 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272 * and expands the device name if you passed a format string to
6273 * alloc_netdev.
6274 */
6275int register_netdev(struct net_device *dev)
6276{
6277 int err;
6278
6279 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006280 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281 rtnl_unlock();
6282 return err;
6283}
6284EXPORT_SYMBOL(register_netdev);
6285
Eric Dumazet29b44332010-10-11 10:22:12 +00006286int netdev_refcnt_read(const struct net_device *dev)
6287{
6288 int i, refcnt = 0;
6289
6290 for_each_possible_cpu(i)
6291 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6292 return refcnt;
6293}
6294EXPORT_SYMBOL(netdev_refcnt_read);
6295
Ben Hutchings2c530402012-07-10 10:55:09 +00006296/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006298 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299 *
6300 * This is called when unregistering network devices.
6301 *
6302 * Any protocol or device that holds a reference should register
6303 * for netdevice notification, and cleanup and put back the
6304 * reference if they receive an UNREGISTER event.
6305 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006306 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006307 */
6308static void netdev_wait_allrefs(struct net_device *dev)
6309{
6310 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006311 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006312
Eric Dumazete014deb2009-11-17 05:59:21 +00006313 linkwatch_forget_dev(dev);
6314
Linus Torvalds1da177e2005-04-16 15:20:36 -07006315 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006316 refcnt = netdev_refcnt_read(dev);
6317
6318 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006319 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006320 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006321
6322 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006323 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006324
Eric Dumazet748e2d92012-08-22 21:50:59 +00006325 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006326 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006327 rtnl_lock();
6328
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006329 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006330 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6331 &dev->state)) {
6332 /* We must not have linkwatch events
6333 * pending on unregister. If this
6334 * happens, we simply run the queue
6335 * unscheduled, resulting in a noop
6336 * for this device.
6337 */
6338 linkwatch_run_queue();
6339 }
6340
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006341 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342
6343 rebroadcast_time = jiffies;
6344 }
6345
6346 msleep(250);
6347
Eric Dumazet29b44332010-10-11 10:22:12 +00006348 refcnt = netdev_refcnt_read(dev);
6349
Linus Torvalds1da177e2005-04-16 15:20:36 -07006350 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006351 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6352 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006353 warning_time = jiffies;
6354 }
6355 }
6356}
6357
6358/* The sequence is:
6359 *
6360 * rtnl_lock();
6361 * ...
6362 * register_netdevice(x1);
6363 * register_netdevice(x2);
6364 * ...
6365 * unregister_netdevice(y1);
6366 * unregister_netdevice(y2);
6367 * ...
6368 * rtnl_unlock();
6369 * free_netdev(y1);
6370 * free_netdev(y2);
6371 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006372 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006374 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006375 * without deadlocking with linkwatch via keventd.
6376 * 2) Since we run with the RTNL semaphore not held, we can sleep
6377 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006378 *
6379 * We must not return until all unregister events added during
6380 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382void netdev_run_todo(void)
6383{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006384 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006385
Linus Torvalds1da177e2005-04-16 15:20:36 -07006386 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006387 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006388
6389 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006390
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006391
6392 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006393 if (!list_empty(&list))
6394 rcu_barrier();
6395
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396 while (!list_empty(&list)) {
6397 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006398 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006399 list_del(&dev->todo_list);
6400
Eric Dumazet748e2d92012-08-22 21:50:59 +00006401 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006402 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006403 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006404
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006405 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006406 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006407 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006408 dump_stack();
6409 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006410 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006411
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006412 dev->reg_state = NETREG_UNREGISTERED;
6413
Changli Gao152102c2010-03-30 20:16:22 +00006414 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006415
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006416 netdev_wait_allrefs(dev);
6417
6418 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006419 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006420 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6421 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006422 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006423
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006424 if (dev->destructor)
6425 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006426
6427 /* Free network device */
6428 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006430}
6431
Ben Hutchings3cfde792010-07-09 09:11:52 +00006432/* Convert net_device_stats to rtnl_link_stats64. They have the same
6433 * fields in the same order, with only the type differing.
6434 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006435void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6436 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006437{
6438#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006439 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6440 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006441#else
6442 size_t i, n = sizeof(*stats64) / sizeof(u64);
6443 const unsigned long *src = (const unsigned long *)netdev_stats;
6444 u64 *dst = (u64 *)stats64;
6445
6446 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6447 sizeof(*stats64) / sizeof(u64));
6448 for (i = 0; i < n; i++)
6449 dst[i] = src[i];
6450#endif
6451}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006452EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006453
Eric Dumazetd83345a2009-11-16 03:36:51 +00006454/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006455 * dev_get_stats - get network device statistics
6456 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006457 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006458 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006459 * Get network statistics from device. Return @storage.
6460 * The device driver may provide its own method by setting
6461 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6462 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006463 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006464struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6465 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006466{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006467 const struct net_device_ops *ops = dev->netdev_ops;
6468
Eric Dumazet28172732010-07-07 14:58:56 -07006469 if (ops->ndo_get_stats64) {
6470 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006471 ops->ndo_get_stats64(dev, storage);
6472 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006473 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006474 } else {
6475 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006476 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006477 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006478 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006479}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006480EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006481
Eric Dumazet24824a02010-10-02 06:11:55 +00006482struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006483{
Eric Dumazet24824a02010-10-02 06:11:55 +00006484 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006485
Eric Dumazet24824a02010-10-02 06:11:55 +00006486#ifdef CONFIG_NET_CLS_ACT
6487 if (queue)
6488 return queue;
6489 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6490 if (!queue)
6491 return NULL;
6492 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006493 queue->qdisc = &noop_qdisc;
6494 queue->qdisc_sleeping = &noop_qdisc;
6495 rcu_assign_pointer(dev->ingress_queue, queue);
6496#endif
6497 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006498}
6499
Eric Dumazet2c60db02012-09-16 09:17:26 +00006500static const struct ethtool_ops default_ethtool_ops;
6501
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006503 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504 * @sizeof_priv: size of private data to allocate space for
6505 * @name: device name format string
6506 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006507 * @txqs: the number of TX subqueues to allocate
6508 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006509 *
6510 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006511 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006512 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006513 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006514struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6515 void (*setup)(struct net_device *),
6516 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006517{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006519 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006520 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006521
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006522 BUG_ON(strlen(name) >= sizeof(dev->name));
6523
Tom Herbert36909ea2011-01-09 19:36:31 +00006524 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006525 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006526 return NULL;
6527 }
6528
Tom Herbert36909ea2011-01-09 19:36:31 +00006529#ifdef CONFIG_RPS
6530 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006531 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006532 return NULL;
6533 }
6534#endif
6535
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006536 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006537 if (sizeof_priv) {
6538 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006539 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006540 alloc_size += sizeof_priv;
6541 }
6542 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006543 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006544
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07006545 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546 if (!p) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006547 pr_err("alloc_netdev: Unable to allocate device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006548 return NULL;
6549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006550
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006551 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006552 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006553
Eric Dumazet29b44332010-10-11 10:22:12 +00006554 dev->pcpu_refcnt = alloc_percpu(int);
6555 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00006556 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006557
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006559 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006560
Jiri Pirko22bedad32010-04-01 21:22:57 +00006561 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006562 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006563
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006564 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006565
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006566 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006567 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006568
Herbert Xud565b0a2008-12-15 23:38:52 -08006569 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006570 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006571 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006572 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006573 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006575
6576 dev->num_tx_queues = txqs;
6577 dev->real_num_tx_queues = txqs;
6578 if (netif_alloc_netdev_queues(dev))
6579 goto free_all;
6580
6581#ifdef CONFIG_RPS
6582 dev->num_rx_queues = rxqs;
6583 dev->real_num_rx_queues = rxqs;
6584 if (netif_alloc_rx_queues(dev))
6585 goto free_all;
6586#endif
6587
Linus Torvalds1da177e2005-04-16 15:20:36 -07006588 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006589 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006590 if (!dev->ethtool_ops)
6591 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006593
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006594free_all:
6595 free_netdev(dev);
6596 return NULL;
6597
Eric Dumazet29b44332010-10-11 10:22:12 +00006598free_pcpu:
6599 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00006600 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006601#ifdef CONFIG_RPS
6602 kfree(dev->_rx);
6603#endif
6604
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006605free_p:
6606 kfree(p);
6607 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006608}
Tom Herbert36909ea2011-01-09 19:36:31 +00006609EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006610
6611/**
6612 * free_netdev - free network device
6613 * @dev: device
6614 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006615 * This function does the last stage of destroying an allocated device
6616 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006617 * If this is the last reference then it will be freed.
6618 */
6619void free_netdev(struct net_device *dev)
6620{
Herbert Xud565b0a2008-12-15 23:38:52 -08006621 struct napi_struct *p, *n;
6622
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006623 release_net(dev_net(dev));
6624
David S. Millere8a04642008-07-17 00:34:19 -07006625 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006626#ifdef CONFIG_RPS
6627 kfree(dev->_rx);
6628#endif
David S. Millere8a04642008-07-17 00:34:19 -07006629
Eric Dumazet33d480c2011-08-11 19:30:52 +00006630 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006631
Jiri Pirkof001fde2009-05-05 02:48:28 +00006632 /* Flush device addresses */
6633 dev_addr_flush(dev);
6634
Herbert Xud565b0a2008-12-15 23:38:52 -08006635 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6636 netif_napi_del(p);
6637
Eric Dumazet29b44332010-10-11 10:22:12 +00006638 free_percpu(dev->pcpu_refcnt);
6639 dev->pcpu_refcnt = NULL;
6640
Stephen Hemminger3041a062006-05-26 13:25:24 -07006641 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006642 if (dev->reg_state == NETREG_UNINITIALIZED) {
6643 kfree((char *)dev - dev->padded);
6644 return;
6645 }
6646
6647 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6648 dev->reg_state = NETREG_RELEASED;
6649
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006650 /* will free via device release */
6651 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006652}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006653EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006654
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006655/**
6656 * synchronize_net - Synchronize with packet receive processing
6657 *
6658 * Wait for packets currently being received to be done.
6659 * Does not block later packets from starting.
6660 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006661void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662{
6663 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006664 if (rtnl_is_locked())
6665 synchronize_rcu_expedited();
6666 else
6667 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006668}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006669EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006670
6671/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006672 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006673 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006674 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006675 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006676 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006677 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006678 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679 *
6680 * Callers must hold the rtnl semaphore. You may want
6681 * unregister_netdev() instead of this.
6682 */
6683
Eric Dumazet44a08732009-10-27 07:03:04 +00006684void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006685{
Herbert Xua6620712007-12-12 19:21:56 -08006686 ASSERT_RTNL();
6687
Eric Dumazet44a08732009-10-27 07:03:04 +00006688 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006689 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006690 } else {
6691 rollback_registered(dev);
6692 /* Finish processing unregister after unlock */
6693 net_set_todo(dev);
6694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006695}
Eric Dumazet44a08732009-10-27 07:03:04 +00006696EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006697
6698/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006699 * unregister_netdevice_many - unregister many devices
6700 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006701 */
6702void unregister_netdevice_many(struct list_head *head)
6703{
6704 struct net_device *dev;
6705
6706 if (!list_empty(head)) {
6707 rollback_registered_many(head);
6708 list_for_each_entry(dev, head, unreg_list)
6709 net_set_todo(dev);
6710 }
6711}
Eric Dumazet63c80992009-10-27 07:06:49 +00006712EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006713
6714/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006715 * unregister_netdev - remove device from the kernel
6716 * @dev: device
6717 *
6718 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006719 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006720 *
6721 * This is just a wrapper for unregister_netdevice that takes
6722 * the rtnl semaphore. In general you want to use this and not
6723 * unregister_netdevice.
6724 */
6725void unregister_netdev(struct net_device *dev)
6726{
6727 rtnl_lock();
6728 unregister_netdevice(dev);
6729 rtnl_unlock();
6730}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006731EXPORT_SYMBOL(unregister_netdev);
6732
Eric W. Biedermance286d32007-09-12 13:53:49 +02006733/**
6734 * dev_change_net_namespace - move device to different nethost namespace
6735 * @dev: device
6736 * @net: network namespace
6737 * @pat: If not NULL name pattern to try if the current device name
6738 * is already taken in the destination network namespace.
6739 *
6740 * This function shuts down a device interface and moves it
6741 * to a new network namespace. On success 0 is returned, on
6742 * a failure a netagive errno code is returned.
6743 *
6744 * Callers must hold the rtnl semaphore.
6745 */
6746
6747int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6748{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006749 int err;
6750
6751 ASSERT_RTNL();
6752
6753 /* Don't allow namespace local devices to be moved. */
6754 err = -EINVAL;
6755 if (dev->features & NETIF_F_NETNS_LOCAL)
6756 goto out;
6757
6758 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006759 if (dev->reg_state != NETREG_REGISTERED)
6760 goto out;
6761
6762 /* Get out if there is nothing todo */
6763 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006764 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006765 goto out;
6766
6767 /* Pick the destination device name, and ensure
6768 * we can use it in the destination network namespace.
6769 */
6770 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006771 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006772 /* We get here if we can't use the current device name */
6773 if (!pat)
6774 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006775 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006776 goto out;
6777 }
6778
6779 /*
6780 * And now a mini version of register_netdevice unregister_netdevice.
6781 */
6782
6783 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006784 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006785
6786 /* And unlink it from device chain */
6787 err = -ENODEV;
6788 unlist_netdevice(dev);
6789
6790 synchronize_net();
6791
6792 /* Shutdown queueing discipline. */
6793 dev_shutdown(dev);
6794
6795 /* Notify protocols, that we are about to destroy
6796 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006797
6798 Note that dev->reg_state stays at NETREG_REGISTERED.
6799 This is wanted because this way 8021q and macvlan know
6800 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006801 */
6802 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006803 rcu_barrier();
6804 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00006805 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006806
6807 /*
6808 * Flush the unicast and multicast chains
6809 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006810 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006811 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006812
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006813 /* Send a netdev-removed uevent to the old namespace */
6814 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6815
Eric W. Biedermance286d32007-09-12 13:53:49 +02006816 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006817 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006818
Eric W. Biedermance286d32007-09-12 13:53:49 +02006819 /* If there is an ifindex conflict assign a new one */
6820 if (__dev_get_by_index(net, dev->ifindex)) {
6821 int iflink = (dev->iflink == dev->ifindex);
6822 dev->ifindex = dev_new_index(net);
6823 if (iflink)
6824 dev->iflink = dev->ifindex;
6825 }
6826
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006827 /* Send a netdev-add uevent to the new namespace */
6828 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6829
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006830 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006831 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006832 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006833
6834 /* Add the device back in the hashes */
6835 list_netdevice(dev);
6836
6837 /* Notify protocols, that a new device appeared. */
6838 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6839
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006840 /*
6841 * Prevent userspace races by waiting until the network
6842 * device is fully setup before sending notifications.
6843 */
6844 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6845
Eric W. Biedermance286d32007-09-12 13:53:49 +02006846 synchronize_net();
6847 err = 0;
6848out:
6849 return err;
6850}
Johannes Berg463d0182009-07-14 00:33:35 +02006851EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006852
Linus Torvalds1da177e2005-04-16 15:20:36 -07006853static int dev_cpu_callback(struct notifier_block *nfb,
6854 unsigned long action,
6855 void *ocpu)
6856{
6857 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006858 struct sk_buff *skb;
6859 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6860 struct softnet_data *sd, *oldsd;
6861
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006862 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863 return NOTIFY_OK;
6864
6865 local_irq_disable();
6866 cpu = smp_processor_id();
6867 sd = &per_cpu(softnet_data, cpu);
6868 oldsd = &per_cpu(softnet_data, oldcpu);
6869
6870 /* Find end of our completion_queue. */
6871 list_skb = &sd->completion_queue;
6872 while (*list_skb)
6873 list_skb = &(*list_skb)->next;
6874 /* Append completion queue from offline CPU. */
6875 *list_skb = oldsd->completion_queue;
6876 oldsd->completion_queue = NULL;
6877
Linus Torvalds1da177e2005-04-16 15:20:36 -07006878 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006879 if (oldsd->output_queue) {
6880 *sd->output_queue_tailp = oldsd->output_queue;
6881 sd->output_queue_tailp = oldsd->output_queue_tailp;
6882 oldsd->output_queue = NULL;
6883 oldsd->output_queue_tailp = &oldsd->output_queue;
6884 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006885 /* Append NAPI poll list from offline CPU. */
6886 if (!list_empty(&oldsd->poll_list)) {
6887 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6888 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6889 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006890
6891 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6892 local_irq_enable();
6893
6894 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006895 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6896 netif_rx(skb);
6897 input_queue_head_incr(oldsd);
6898 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006899 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006900 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006901 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006903
6904 return NOTIFY_OK;
6905}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006906
6907
Herbert Xu7f353bf2007-08-10 15:47:58 -07006908/**
Herbert Xub63365a2008-10-23 01:11:29 -07006909 * netdev_increment_features - increment feature set by one
6910 * @all: current feature set
6911 * @one: new feature set
6912 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006913 *
6914 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006915 * @one to the master device with current feature set @all. Will not
6916 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006917 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006918netdev_features_t netdev_increment_features(netdev_features_t all,
6919 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006920{
Michał Mirosław1742f182011-04-22 06:31:16 +00006921 if (mask & NETIF_F_GEN_CSUM)
6922 mask |= NETIF_F_ALL_CSUM;
6923 mask |= NETIF_F_VLAN_CHALLENGED;
6924
6925 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6926 all &= one | ~NETIF_F_ALL_FOR_ALL;
6927
Michał Mirosław1742f182011-04-22 06:31:16 +00006928 /* If one device supports hw checksumming, set for all. */
6929 if (all & NETIF_F_GEN_CSUM)
6930 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006931
6932 return all;
6933}
Herbert Xub63365a2008-10-23 01:11:29 -07006934EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006935
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006936static struct hlist_head *netdev_create_hash(void)
6937{
6938 int i;
6939 struct hlist_head *hash;
6940
6941 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6942 if (hash != NULL)
6943 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6944 INIT_HLIST_HEAD(&hash[i]);
6945
6946 return hash;
6947}
6948
Eric W. Biederman881d9662007-09-17 11:56:21 -07006949/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006950static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006951{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006952 if (net != &init_net)
6953 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006954
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006955 net->dev_name_head = netdev_create_hash();
6956 if (net->dev_name_head == NULL)
6957 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006958
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006959 net->dev_index_head = netdev_create_hash();
6960 if (net->dev_index_head == NULL)
6961 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006962
6963 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006964
6965err_idx:
6966 kfree(net->dev_name_head);
6967err_name:
6968 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006969}
6970
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006971/**
6972 * netdev_drivername - network driver for the device
6973 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006974 *
6975 * Determine network driver for device.
6976 */
David S. Miller3019de12011-06-06 16:41:33 -07006977const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006978{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006979 const struct device_driver *driver;
6980 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006981 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006982
6983 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006984 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006985 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006986
6987 driver = parent->driver;
6988 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006989 return driver->name;
6990 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006991}
6992
Joe Perchesb004ff42012-09-12 20:12:19 -07006993static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006994 struct va_format *vaf)
6995{
6996 int r;
6997
Joe Perchesb004ff42012-09-12 20:12:19 -07006998 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006999 r = dev_printk_emit(level[1] - '0',
7000 dev->dev.parent,
7001 "%s %s %s: %pV",
7002 dev_driver_string(dev->dev.parent),
7003 dev_name(dev->dev.parent),
7004 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007005 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00007006 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007007 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00007008 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007009 }
Joe Perches256df2f2010-06-27 01:02:35 +00007010
7011 return r;
7012}
7013
7014int netdev_printk(const char *level, const struct net_device *dev,
7015 const char *format, ...)
7016{
7017 struct va_format vaf;
7018 va_list args;
7019 int r;
7020
7021 va_start(args, format);
7022
7023 vaf.fmt = format;
7024 vaf.va = &args;
7025
7026 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007027
Joe Perches256df2f2010-06-27 01:02:35 +00007028 va_end(args);
7029
7030 return r;
7031}
7032EXPORT_SYMBOL(netdev_printk);
7033
7034#define define_netdev_printk_level(func, level) \
7035int func(const struct net_device *dev, const char *fmt, ...) \
7036{ \
7037 int r; \
7038 struct va_format vaf; \
7039 va_list args; \
7040 \
7041 va_start(args, fmt); \
7042 \
7043 vaf.fmt = fmt; \
7044 vaf.va = &args; \
7045 \
7046 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007047 \
Joe Perches256df2f2010-06-27 01:02:35 +00007048 va_end(args); \
7049 \
7050 return r; \
7051} \
7052EXPORT_SYMBOL(func);
7053
7054define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7055define_netdev_printk_level(netdev_alert, KERN_ALERT);
7056define_netdev_printk_level(netdev_crit, KERN_CRIT);
7057define_netdev_printk_level(netdev_err, KERN_ERR);
7058define_netdev_printk_level(netdev_warn, KERN_WARNING);
7059define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7060define_netdev_printk_level(netdev_info, KERN_INFO);
7061
Pavel Emelyanov46650792007-10-08 20:38:39 -07007062static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007063{
7064 kfree(net->dev_name_head);
7065 kfree(net->dev_index_head);
7066}
7067
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007068static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007069 .init = netdev_init,
7070 .exit = netdev_exit,
7071};
7072
Pavel Emelyanov46650792007-10-08 20:38:39 -07007073static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007074{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007075 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007076 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007077 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007078 * initial network namespace
7079 */
7080 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007081 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007082 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007083 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007084
7085 /* Ignore unmoveable devices (i.e. loopback) */
7086 if (dev->features & NETIF_F_NETNS_LOCAL)
7087 continue;
7088
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007089 /* Leave virtual devices for the generic cleanup */
7090 if (dev->rtnl_link_ops)
7091 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007092
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007093 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007094 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7095 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007096 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007097 pr_emerg("%s: failed to move %s to init_net: %d\n",
7098 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007099 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007100 }
7101 }
7102 rtnl_unlock();
7103}
7104
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007105static void __net_exit default_device_exit_batch(struct list_head *net_list)
7106{
7107 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007108 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007109 * Do this across as many network namespaces as possible to
7110 * improve batching efficiency.
7111 */
7112 struct net_device *dev;
7113 struct net *net;
7114 LIST_HEAD(dev_kill_list);
7115
7116 rtnl_lock();
7117 list_for_each_entry(net, net_list, exit_list) {
7118 for_each_netdev_reverse(net, dev) {
7119 if (dev->rtnl_link_ops)
7120 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7121 else
7122 unregister_netdevice_queue(dev, &dev_kill_list);
7123 }
7124 }
7125 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007126 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007127 rtnl_unlock();
7128}
7129
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007130static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007131 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007132 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007133};
7134
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135/*
7136 * Initialize the DEV module. At boot time this walks the device list and
7137 * unhooks any devices that fail to initialise (normally hardware not
7138 * present) and leaves us with a valid list of present and active devices.
7139 *
7140 */
7141
7142/*
7143 * This is called single threaded during boot, so no need
7144 * to take the rtnl semaphore.
7145 */
7146static int __init net_dev_init(void)
7147{
7148 int i, rc = -ENOMEM;
7149
7150 BUG_ON(!dev_boot_phase);
7151
Linus Torvalds1da177e2005-04-16 15:20:36 -07007152 if (dev_proc_init())
7153 goto out;
7154
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007155 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156 goto out;
7157
7158 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007159 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160 INIT_LIST_HEAD(&ptype_base[i]);
7161
Vlad Yasevich62532da2012-11-15 08:49:10 +00007162 INIT_LIST_HEAD(&offload_base);
7163
Eric W. Biederman881d9662007-09-17 11:56:21 -07007164 if (register_pernet_subsys(&netdev_net_ops))
7165 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007166
7167 /*
7168 * Initialise the packet receive queues.
7169 */
7170
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007171 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007172 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007173
Changli Gaodee42872010-05-02 05:42:16 +00007174 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007175 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007176 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007177 sd->completion_queue = NULL;
7178 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007179 sd->output_queue = NULL;
7180 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007181#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007182 sd->csd.func = rps_trigger_softirq;
7183 sd->csd.info = sd;
7184 sd->csd.flags = 0;
7185 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007186#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007187
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007188 sd->backlog.poll = process_backlog;
7189 sd->backlog.weight = weight_p;
7190 sd->backlog.gro_list = NULL;
7191 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007192 }
7193
Linus Torvalds1da177e2005-04-16 15:20:36 -07007194 dev_boot_phase = 0;
7195
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007196 /* The loopback device is special if any other network devices
7197 * is present in a network namespace the loopback device must
7198 * be present. Since we now dynamically allocate and free the
7199 * loopback device ensure this invariant is maintained by
7200 * keeping the loopback device as the first device on the
7201 * list of network devices. Ensuring the loopback devices
7202 * is the first device that appears and the last network device
7203 * that disappears.
7204 */
7205 if (register_pernet_device(&loopback_net_ops))
7206 goto out;
7207
7208 if (register_pernet_device(&default_device_ops))
7209 goto out;
7210
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007211 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7212 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213
7214 hotcpu_notifier(dev_cpu_callback, 0);
7215 dst_init();
7216 dev_mcast_init();
7217 rc = 0;
7218out:
7219 return rc;
7220}
7221
7222subsys_initcall(net_dev_init);
7223
Krishna Kumare88721f2009-02-18 17:55:02 -08007224static int __init initialize_hashrnd(void)
7225{
Tom Herbert0a9627f2010-03-16 08:03:29 +00007226 get_random_bytes(&hashrnd, sizeof(hashrnd));
Krishna Kumare88721f2009-02-18 17:55:02 -08007227 return 0;
7228}
7229
7230late_initcall_sync(initialize_hashrnd);
7231