blob: 231de8738149ba096b428433325a9eac871a5574 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900130#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900131#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000132#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700133#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000134#include <linux/cpu_rmap.h>
Richard Cochran4dc360c2011-10-19 17:00:35 -0400135#include <linux/net_tstamp.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100136#include <linux/static_key.h>
Eric Dumazet4504b862011-11-28 05:23:23 +0000137#include <net/flow_keys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700139#include "net-sysfs.h"
140
Herbert Xud565b0a2008-12-15 23:38:52 -0800141/* Instead of increasing this, you should create a hash table. */
142#define MAX_GRO_SKBS 8
143
Herbert Xu5d38a072009-01-04 16:13:40 -0800144/* This should be increased if a protocol with a bigger head is added. */
145#define GRO_MAX_HEAD (MAX_HEADER + 128)
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
150 *
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
153 *
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700158 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 * --BLG
160 *
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
173 */
174
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800175#define PTYPE_HASH_SIZE (16)
176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000179static DEFINE_SPINLOCK(offload_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700181static struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000182static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * semaphore.
187 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
190 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700191 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
194 *
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
198 *
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
201 * semaphore held.
202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204EXPORT_SYMBOL(dev_base_lock);
205
Eric Dumazet30e6c9f2012-12-20 17:25:08 +0000206seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000207
Thomas Graf4e985ad2011-06-21 03:11:20 +0000208static inline void dev_base_seq_inc(struct net *net)
209{
210 while (++net->dev_base_seq == 0);
211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric Dumazet95c96172012-04-15 05:58:06 +0000215 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
216
stephen hemminger08e98972009-11-10 07:20:34 +0000217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000225static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000226{
227#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000228 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000229#endif
230}
231
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000232static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000233{
234#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000235 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000236#endif
237}
238
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239/* Device list insertion */
240static int list_netdevice(struct net_device *dev)
241{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900242 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243
244 ASSERT_RTNL();
245
246 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800247 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000248 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000249 hlist_add_head_rcu(&dev->index_hlist,
250 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200251 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000252
253 dev_base_seq_inc(net);
254
Eric W. Biedermance286d32007-09-12 13:53:49 +0200255 return 0;
256}
257
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000258/* Device list removal
259 * caller must respect a RCU grace period before freeing/reusing dev
260 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200261static void unlist_netdevice(struct net_device *dev)
262{
263 ASSERT_RTNL();
264
265 /* Unlink dev from the device chain */
266 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800267 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000268 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000269 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200270 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000271
272 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275/*
276 * Our notifier list
277 */
278
Alan Sternf07d5b92006-05-09 15:23:03 -0700279static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281/*
282 * Device drivers call our routines to queue packets here. We empty the
283 * queue in the local softnet handler.
284 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700285
Eric Dumazet9958da02010-04-17 04:17:02 +0000286DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700287EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
David S. Millercf508b12008-07-22 14:16:42 -0700289#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290/*
David S. Millerc773e842008-07-08 23:13:53 -0700291 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292 * according to dev->type
293 */
294static const unsigned short netdev_lock_type[] =
295 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
296 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
297 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
298 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
299 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
300 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
301 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
302 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
303 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
304 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
305 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
306 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400307 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
308 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
309 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700311static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
313 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
314 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
315 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
316 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
317 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
318 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
319 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
320 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
321 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
322 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
323 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400324 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
325 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
326 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327
328static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700329static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330
331static inline unsigned short netdev_lock_pos(unsigned short dev_type)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
336 if (netdev_lock_type[i] == dev_type)
337 return i;
338 /* the last key is used by default */
339 return ARRAY_SIZE(netdev_lock_type) - 1;
340}
341
David S. Millercf508b12008-07-22 14:16:42 -0700342static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
343 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700344{
345 int i;
346
347 i = netdev_lock_pos(dev_type);
348 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
349 netdev_lock_name[i]);
350}
David S. Millercf508b12008-07-22 14:16:42 -0700351
352static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353{
354 int i;
355
356 i = netdev_lock_pos(dev->type);
357 lockdep_set_class_and_name(&dev->addr_list_lock,
358 &netdev_addr_lock_key[i],
359 netdev_lock_name[i]);
360}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700361#else
David S. Millercf508b12008-07-22 14:16:42 -0700362static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
363 unsigned short dev_type)
364{
365}
366static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700367{
368}
369#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371/*******************************************************************************
372
373 Protocol management and registration routines
374
375*******************************************************************************/
376
377/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 * Add a protocol ID to the list. Now that the input handler is
379 * smarter we can dispense with all the messy stuff that used to be
380 * here.
381 *
382 * BEWARE!!! Protocol handlers, mangling input packets,
383 * MUST BE last in hash buckets and checking protocol handlers
384 * MUST start from promiscuous ptype_all chain in net_bh.
385 * It is true now, do not change it.
386 * Explanation follows: if protocol handler, mangling packet, will
387 * be the first on list, it is not able to sense, that packet
388 * is cloned and should be copied-on-write, so that it will
389 * change it and subsequent readers will get broken packet.
390 * --ANK (980803)
391 */
392
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000393static inline struct list_head *ptype_head(const struct packet_type *pt)
394{
395 if (pt->type == htons(ETH_P_ALL))
396 return &ptype_all;
397 else
398 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/**
402 * dev_add_pack - add packet handler
403 * @pt: packet type declaration
404 *
405 * Add a protocol handler to the networking stack. The passed &packet_type
406 * is linked into kernel lists and may not be freed until it has been
407 * removed from the kernel lists.
408 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900409 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 * guarantee all CPU's that are in middle of receiving packets
411 * will see the new packet type (until the next received packet).
412 */
413
414void dev_add_pack(struct packet_type *pt)
415{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000416 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000418 spin_lock(&ptype_lock);
419 list_add_rcu(&pt->list, head);
420 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * __dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900431 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 *
433 * The packet type might still be in use by receivers
434 * and must not be freed until after all the CPU's have gone
435 * through a quiescent state.
436 */
437void __dev_remove_pack(struct packet_type *pt)
438{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000439 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 struct packet_type *pt1;
441
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000442 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 list_for_each_entry(pt1, head, list) {
445 if (pt == pt1) {
446 list_del_rcu(&pt->list);
447 goto out;
448 }
449 }
450
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000451 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000453 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700455EXPORT_SYMBOL(__dev_remove_pack);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/**
458 * dev_remove_pack - remove packet handler
459 * @pt: packet type declaration
460 *
461 * Remove a protocol handler that was previously added to the kernel
462 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
463 * from the kernel lists and can be freed or reused once this function
464 * returns.
465 *
466 * This call sleeps to guarantee that no CPU is looking at the packet
467 * type after return.
468 */
469void dev_remove_pack(struct packet_type *pt)
470{
471 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 synchronize_net();
474}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700475EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Vlad Yasevich62532da2012-11-15 08:49:10 +0000477
478/**
479 * dev_add_offload - register offload handlers
480 * @po: protocol offload declaration
481 *
482 * Add protocol offload handlers to the networking stack. The passed
483 * &proto_offload is linked into kernel lists and may not be freed until
484 * it has been removed from the kernel lists.
485 *
486 * This call does not sleep therefore it can not
487 * guarantee all CPU's that are in middle of receiving packets
488 * will see the new offload handlers (until the next received packet).
489 */
490void dev_add_offload(struct packet_offload *po)
491{
492 struct list_head *head = &offload_base;
493
494 spin_lock(&offload_lock);
495 list_add_rcu(&po->list, head);
496 spin_unlock(&offload_lock);
497}
498EXPORT_SYMBOL(dev_add_offload);
499
500/**
501 * __dev_remove_offload - remove offload handler
502 * @po: packet offload declaration
503 *
504 * Remove a protocol offload handler that was previously added to the
505 * kernel offload handlers by dev_add_offload(). The passed &offload_type
506 * is removed from the kernel lists and can be freed or reused once this
507 * function returns.
508 *
509 * The packet type might still be in use by receivers
510 * and must not be freed until after all the CPU's have gone
511 * through a quiescent state.
512 */
513void __dev_remove_offload(struct packet_offload *po)
514{
515 struct list_head *head = &offload_base;
516 struct packet_offload *po1;
517
Eric Dumazetc53aa502012-11-16 08:08:23 +0000518 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000519
520 list_for_each_entry(po1, head, list) {
521 if (po == po1) {
522 list_del_rcu(&po->list);
523 goto out;
524 }
525 }
526
527 pr_warn("dev_remove_offload: %p not found\n", po);
528out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000529 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000530}
531EXPORT_SYMBOL(__dev_remove_offload);
532
533/**
534 * dev_remove_offload - remove packet offload handler
535 * @po: packet offload declaration
536 *
537 * Remove a packet offload handler that was previously added to the kernel
538 * offload handlers by dev_add_offload(). The passed &offload_type is
539 * removed from the kernel lists and can be freed or reused once this
540 * function returns.
541 *
542 * This call sleeps to guarantee that no CPU is looking at the packet
543 * type after return.
544 */
545void dev_remove_offload(struct packet_offload *po)
546{
547 __dev_remove_offload(po);
548
549 synchronize_net();
550}
551EXPORT_SYMBOL(dev_remove_offload);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553/******************************************************************************
554
555 Device Boot-time Settings Routines
556
557*******************************************************************************/
558
559/* Boot time configuration table */
560static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
561
562/**
563 * netdev_boot_setup_add - add new setup entry
564 * @name: name of the device
565 * @map: configured settings for the device
566 *
567 * Adds new setup entry to the dev_boot_setup list. The function
568 * returns 0 on error and 1 on success. This is a generic routine to
569 * all netdevices.
570 */
571static int netdev_boot_setup_add(char *name, struct ifmap *map)
572{
573 struct netdev_boot_setup *s;
574 int i;
575
576 s = dev_boot_setup;
577 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
578 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
579 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700580 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 memcpy(&s[i].map, map, sizeof(s[i].map));
582 break;
583 }
584 }
585
586 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
587}
588
589/**
590 * netdev_boot_setup_check - check boot time settings
591 * @dev: the netdevice
592 *
593 * Check boot time settings for the device.
594 * The found settings are set for the device to be used
595 * later in the device probing.
596 * Returns 0 if no settings found, 1 if they are.
597 */
598int netdev_boot_setup_check(struct net_device *dev)
599{
600 struct netdev_boot_setup *s = dev_boot_setup;
601 int i;
602
603 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
604 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700605 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 dev->irq = s[i].map.irq;
607 dev->base_addr = s[i].map.base_addr;
608 dev->mem_start = s[i].map.mem_start;
609 dev->mem_end = s[i].map.mem_end;
610 return 1;
611 }
612 }
613 return 0;
614}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700615EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617
618/**
619 * netdev_boot_base - get address from boot time settings
620 * @prefix: prefix for network device
621 * @unit: id for network device
622 *
623 * Check boot time settings for the base address of device.
624 * The found settings are set for the device to be used
625 * later in the device probing.
626 * Returns 0 if no settings found.
627 */
628unsigned long netdev_boot_base(const char *prefix, int unit)
629{
630 const struct netdev_boot_setup *s = dev_boot_setup;
631 char name[IFNAMSIZ];
632 int i;
633
634 sprintf(name, "%s%d", prefix, unit);
635
636 /*
637 * If device already registered then return base of 1
638 * to indicate not to probe for this interface
639 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700640 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return 1;
642
643 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
644 if (!strcmp(name, s[i].name))
645 return s[i].map.base_addr;
646 return 0;
647}
648
649/*
650 * Saves at boot time configured settings for any netdevice.
651 */
652int __init netdev_boot_setup(char *str)
653{
654 int ints[5];
655 struct ifmap map;
656
657 str = get_options(str, ARRAY_SIZE(ints), ints);
658 if (!str || !*str)
659 return 0;
660
661 /* Save settings */
662 memset(&map, 0, sizeof(map));
663 if (ints[0] > 0)
664 map.irq = ints[1];
665 if (ints[0] > 1)
666 map.base_addr = ints[2];
667 if (ints[0] > 2)
668 map.mem_start = ints[3];
669 if (ints[0] > 3)
670 map.mem_end = ints[4];
671
672 /* Add new entry to the list */
673 return netdev_boot_setup_add(str, &map);
674}
675
676__setup("netdev=", netdev_boot_setup);
677
678/*******************************************************************************
679
680 Device Interface Subroutines
681
682*******************************************************************************/
683
684/**
685 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700686 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 * @name: name to find
688 *
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
694 */
695
Eric W. Biederman881d9662007-09-17 11:56:21 -0700696struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
698 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700699 struct net_device *dev;
700 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700702 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (!strncmp(dev->name, name, IFNAMSIZ))
704 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return NULL;
707}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700708EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 * dev_get_by_name_rcu - find a device by its name
712 * @net: the applicable net namespace
713 * @name: name to find
714 *
715 * Find an interface by name.
716 * If the name is found a pointer to the device is returned.
717 * If the name is not found then %NULL is returned.
718 * The reference counters are not incremented so the caller must be
719 * careful with locks. The caller must hold RCU lock.
720 */
721
722struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
723{
724 struct hlist_node *p;
725 struct net_device *dev;
726 struct hlist_head *head = dev_name_hash(net, name);
727
728 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
729 if (!strncmp(dev->name, name, IFNAMSIZ))
730 return dev;
731
732 return NULL;
733}
734EXPORT_SYMBOL(dev_get_by_name_rcu);
735
736/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700738 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * @name: name to find
740 *
741 * Find an interface by name. This can be called from any
742 * context and does its own locking. The returned handle has
743 * the usage count incremented and the caller must use dev_put() to
744 * release it when it is no longer needed. %NULL is returned if no
745 * matching device is found.
746 */
747
Eric W. Biederman881d9662007-09-17 11:56:21 -0700748struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 struct net_device *dev;
751
Eric Dumazet72c95282009-10-30 07:11:27 +0000752 rcu_read_lock();
753 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if (dev)
755 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000756 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return dev;
758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700759EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761/**
762 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700763 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * @ifindex: index of device
765 *
766 * Search for an interface by index. Returns %NULL if the device
767 * is not found or a pointer to the device. The device has not
768 * had its reference counter increased so the caller must be careful
769 * about locking. The caller must hold either the RTNL semaphore
770 * or @dev_base_lock.
771 */
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700776 struct net_device *dev;
777 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700779 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (dev->ifindex == ifindex)
781 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return NULL;
784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700785EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000787/**
788 * dev_get_by_index_rcu - find a device by its ifindex
789 * @net: the applicable net namespace
790 * @ifindex: index of device
791 *
792 * Search for an interface by index. Returns %NULL if the device
793 * is not found or a pointer to the device. The device has not
794 * had its reference counter increased so the caller must be careful
795 * about locking. The caller must hold RCU lock.
796 */
797
798struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
799{
800 struct hlist_node *p;
801 struct net_device *dev;
802 struct hlist_head *head = dev_index_hash(net, ifindex);
803
804 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
805 if (dev->ifindex == ifindex)
806 return dev;
807
808 return NULL;
809}
810EXPORT_SYMBOL(dev_get_by_index_rcu);
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
813/**
814 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700815 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns NULL if the device
819 * is not found or a pointer to the device. The device returned has
820 * had a reference added and the pointer is safe until the user calls
821 * dev_put to indicate they have finished with it.
822 */
823
Eric W. Biederman881d9662007-09-17 11:56:21 -0700824struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
826 struct net_device *dev;
827
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000828 rcu_read_lock();
829 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (dev)
831 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000832 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return dev;
834}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700835EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000838 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700839 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * @type: media type of device
841 * @ha: hardware address
842 *
843 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800844 * is not found or a pointer to the device.
845 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000846 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 * and the caller must therefore be careful about locking
848 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
850
Eric Dumazet941666c2010-12-05 01:23:53 +0000851struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
852 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 struct net_device *dev;
855
Eric Dumazet941666c2010-12-05 01:23:53 +0000856 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (dev->type == type &&
858 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700859 return dev;
860
861 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
Eric Dumazet941666c2010-12-05 01:23:53 +0000863EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300864
Eric W. Biederman881d9662007-09-17 11:56:21 -0700865struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700866{
867 struct net_device *dev;
868
869 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700870 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700871 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700872 return dev;
873
874 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700876EXPORT_SYMBOL(__dev_getfirstbyhwtype);
877
Eric W. Biederman881d9662007-09-17 11:56:21 -0700878struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000880 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000882 rcu_read_lock();
883 for_each_netdev_rcu(net, dev)
884 if (dev->type == type) {
885 dev_hold(dev);
886 ret = dev;
887 break;
888 }
889 rcu_read_unlock();
890 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892EXPORT_SYMBOL(dev_getfirstbyhwtype);
893
894/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000895 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700896 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 * @if_flags: IFF_* values
898 * @mask: bitmask of bits in if_flags to check
899 *
900 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000901 * is not found or a pointer to the device. Must be called inside
902 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 */
904
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000905struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700906 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700908 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Pavel Emelianov7562f872007-05-03 15:13:45 -0700910 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800911 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700913 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915 }
916 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000919EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/**
922 * dev_valid_name - check if name is okay for network device
923 * @name: name string
924 *
925 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700926 * to allow sysfs to work. We also disallow any kind of
927 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 */
David S. Miller95f050b2012-03-06 16:12:15 -0500929bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700931 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500932 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700933 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500934 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700935 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500936 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700937
938 while (*name) {
939 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500940 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700941 name++;
942 }
David S. Miller95f050b2012-03-06 16:12:15 -0500943 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700945EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200948 * __dev_alloc_name - allocate a name for a device
949 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200951 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 *
953 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700954 * id. It scans list of devices to build up a free map, then chooses
955 * the first empty slot. The caller must hold the dev_base or rtnl lock
956 * while allocating the name and adding the device in order to avoid
957 * duplicates.
958 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
959 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 */
961
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200962static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
964 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 const char *p;
966 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700967 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct net_device *d;
969
970 p = strnchr(name, IFNAMSIZ-1, '%');
971 if (p) {
972 /*
973 * Verify the string as this thing may have come from
974 * the user. There must be either one "%d" and no other "%"
975 * characters.
976 */
977 if (p[1] != 'd' || strchr(p + 2, '%'))
978 return -EINVAL;
979
980 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700981 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (!inuse)
983 return -ENOMEM;
984
Eric W. Biederman881d9662007-09-17 11:56:21 -0700985 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (!sscanf(d->name, name, &i))
987 continue;
988 if (i < 0 || i >= max_netdevices)
989 continue;
990
991 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (!strncmp(buf, d->name, IFNAMSIZ))
994 set_bit(i, inuse);
995 }
996
997 i = find_first_zero_bit(inuse, max_netdevices);
998 free_page((unsigned long) inuse);
999 }
1000
Octavian Purdilad9031022009-11-18 02:36:59 +00001001 if (buf != name)
1002 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001003 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* It is possible to run out of possible slots
1007 * when the name is long and there isn't enough space left
1008 * for the digits, or if all bits are used.
1009 */
1010 return -ENFILE;
1011}
1012
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013/**
1014 * dev_alloc_name - allocate a name for a device
1015 * @dev: device
1016 * @name: name format string
1017 *
1018 * Passed a format string - eg "lt%d" it will try and find a suitable
1019 * id. It scans list of devices to build up a free map, then chooses
1020 * the first empty slot. The caller must hold the dev_base or rtnl lock
1021 * while allocating the name and adding the device in order to avoid
1022 * duplicates.
1023 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1024 * Returns the number of the unit assigned or a negative errno code.
1025 */
1026
1027int dev_alloc_name(struct net_device *dev, const char *name)
1028{
1029 char buf[IFNAMSIZ];
1030 struct net *net;
1031 int ret;
1032
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001033 BUG_ON(!dev_net(dev));
1034 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001035 ret = __dev_alloc_name(net, name, buf);
1036 if (ret >= 0)
1037 strlcpy(dev->name, buf, IFNAMSIZ);
1038 return ret;
1039}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001041
Gao feng828de4f2012-09-13 20:58:27 +00001042static int dev_alloc_name_ns(struct net *net,
1043 struct net_device *dev,
1044 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001045{
Gao feng828de4f2012-09-13 20:58:27 +00001046 char buf[IFNAMSIZ];
1047 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001048
Gao feng828de4f2012-09-13 20:58:27 +00001049 ret = __dev_alloc_name(net, name, buf);
1050 if (ret >= 0)
1051 strlcpy(dev->name, buf, IFNAMSIZ);
1052 return ret;
1053}
1054
1055static int dev_get_valid_name(struct net *net,
1056 struct net_device *dev,
1057 const char *name)
1058{
1059 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001060
Octavian Purdilad9031022009-11-18 02:36:59 +00001061 if (!dev_valid_name(name))
1062 return -EINVAL;
1063
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001064 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001065 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001066 else if (__dev_get_by_name(net, name))
1067 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001068 else if (dev->name != name)
1069 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001070
1071 return 0;
1072}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074/**
1075 * dev_change_name - change name of a device
1076 * @dev: device
1077 * @newname: name (or format string) must be at least IFNAMSIZ
1078 *
1079 * Change name of a device, can pass format strings "eth%d".
1080 * for wildcarding.
1081 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001082int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Herbert Xufcc5a032007-07-30 17:03:38 -07001084 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001086 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001087 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001090 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001092 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (dev->flags & IFF_UP)
1094 return -EBUSY;
1095
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001096 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001097
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001099 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001100 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001101 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001102
Herbert Xufcc5a032007-07-30 17:03:38 -07001103 memcpy(oldname, dev->name, IFNAMSIZ);
1104
Gao feng828de4f2012-09-13 20:58:27 +00001105 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001106 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001107 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001108 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Herbert Xufcc5a032007-07-30 17:03:38 -07001111rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001112 ret = device_rename(&dev->dev, dev->name);
1113 if (ret) {
1114 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001115 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001116 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001117 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001118
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001119 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001120
Herbert Xu7f988ea2007-07-30 16:35:46 -07001121 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001122 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001123 write_unlock_bh(&dev_base_lock);
1124
1125 synchronize_rcu();
1126
1127 write_lock_bh(&dev_base_lock);
1128 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001129 write_unlock_bh(&dev_base_lock);
1130
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001131 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001132 ret = notifier_to_errno(ret);
1133
1134 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001135 /* err >= 0 after dev_alloc_name() or stores the first errno */
1136 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001137 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001138 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001139 memcpy(dev->name, oldname, IFNAMSIZ);
1140 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001141 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001142 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001143 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001144 }
1145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 return err;
1148}
1149
1150/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001151 * dev_set_alias - change ifalias of a device
1152 * @dev: device
1153 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001154 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001155 *
1156 * Set ifalias for a device,
1157 */
1158int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1159{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001160 char *new_ifalias;
1161
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001162 ASSERT_RTNL();
1163
1164 if (len >= IFALIASZ)
1165 return -EINVAL;
1166
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001167 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001168 kfree(dev->ifalias);
1169 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001170 return 0;
1171 }
1172
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001173 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1174 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001175 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001176 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001177
1178 strlcpy(dev->ifalias, alias, len+1);
1179 return len;
1180}
1181
1182
1183/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001184 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001185 * @dev: device to cause notification
1186 *
1187 * Called to indicate a device has changed features.
1188 */
1189void netdev_features_change(struct net_device *dev)
1190{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001191 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001192}
1193EXPORT_SYMBOL(netdev_features_change);
1194
1195/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 * netdev_state_change - device changes state
1197 * @dev: device to cause notification
1198 *
1199 * Called to indicate a device has changed state. This function calls
1200 * the notifier chains for netdev_chain and sends a NEWLINK message
1201 * to the routing socket.
1202 */
1203void netdev_state_change(struct net_device *dev)
1204{
1205 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001206 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1208 }
1209}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001210EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Amerigo Wangee89bab2012-08-09 22:14:56 +00001212/**
1213 * netdev_notify_peers - notify network peers about existence of @dev
1214 * @dev: network device
1215 *
1216 * Generate traffic such that interested network peers are aware of
1217 * @dev, such as by generating a gratuitous ARP. This may be used when
1218 * a device wants to inform the rest of the network about some sort of
1219 * reconfiguration such as a failover event or virtual machine
1220 * migration.
1221 */
1222void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001223{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001224 rtnl_lock();
1225 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1226 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001227}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001228EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230/**
1231 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001232 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 * @name: name of interface
1234 *
1235 * If a network interface is not present and the process has suitable
1236 * privileges this function loads the module. If module loading is not
1237 * available in this kernel then it becomes a nop.
1238 */
1239
Eric W. Biederman881d9662007-09-17 11:56:21 -07001240void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 struct net_device *dev;
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001243 int no_module;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Eric Dumazet72c95282009-10-30 07:11:27 +00001245 rcu_read_lock();
1246 dev = dev_get_by_name_rcu(net, name);
1247 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001249 no_module = !dev;
1250 if (no_module && capable(CAP_NET_ADMIN))
1251 no_module = request_module("netdev-%s", name);
1252 if (no_module && capable(CAP_SYS_MODULE)) {
1253 if (!request_module("%s", name))
Vinson Lee7cecb522012-06-27 14:32:07 +00001254 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1255 name);
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03001256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001258EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Patrick McHardybd380812010-02-26 06:34:53 +00001260static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001262 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001263 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001265 ASSERT_RTNL();
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 if (!netif_device_present(dev))
1268 return -ENODEV;
1269
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001270 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271 ret = notifier_to_errno(ret);
1272 if (ret)
1273 return ret;
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001276
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001277 if (ops->ndo_validate_addr)
1278 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001279
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001280 if (!ret && ops->ndo_open)
1281 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jeff Garzikbada3392007-10-23 20:19:37 -07001283 if (ret)
1284 clear_bit(__LINK_STATE_START, &dev->state);
1285 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001287 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001288 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001290 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return ret;
1294}
Patrick McHardybd380812010-02-26 06:34:53 +00001295
1296/**
1297 * dev_open - prepare an interface for use.
1298 * @dev: device to open
1299 *
1300 * Takes a device from down to up state. The device's private open
1301 * function is invoked and then the multicast lists are loaded. Finally
1302 * the device is moved into the up state and a %NETDEV_UP message is
1303 * sent to the netdev notifier chain.
1304 *
1305 * Calling this function on an active interface is a nop. On a failure
1306 * a negative errno code is returned.
1307 */
1308int dev_open(struct net_device *dev)
1309{
1310 int ret;
1311
Patrick McHardybd380812010-02-26 06:34:53 +00001312 if (dev->flags & IFF_UP)
1313 return 0;
1314
Patrick McHardybd380812010-02-26 06:34:53 +00001315 ret = __dev_open(dev);
1316 if (ret < 0)
1317 return ret;
1318
Patrick McHardybd380812010-02-26 06:34:53 +00001319 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1320 call_netdevice_notifiers(NETDEV_UP, dev);
1321
1322 return ret;
1323}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001324EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Octavian Purdila44345722010-12-13 12:44:07 +00001326static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
Octavian Purdila44345722010-12-13 12:44:07 +00001328 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001329
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001330 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001331 might_sleep();
1332
Octavian Purdila44345722010-12-13 12:44:07 +00001333 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001334 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Octavian Purdila44345722010-12-13 12:44:07 +00001336 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Octavian Purdila44345722010-12-13 12:44:07 +00001338 /* Synchronize to scheduled poll. We cannot touch poll list, it
1339 * can be even on different cpu. So just clear netif_running().
1340 *
1341 * dev->stop() will invoke napi_disable() on all of it's
1342 * napi_struct instances on this device.
1343 */
1344 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Octavian Purdila44345722010-12-13 12:44:07 +00001347 dev_deactivate_many(head);
1348
1349 list_for_each_entry(dev, head, unreg_list) {
1350 const struct net_device_ops *ops = dev->netdev_ops;
1351
1352 /*
1353 * Call the device specific close. This cannot fail.
1354 * Only if device is UP
1355 *
1356 * We allow it to be called even after a DETACH hot-plug
1357 * event.
1358 */
1359 if (ops->ndo_stop)
1360 ops->ndo_stop(dev);
1361
Octavian Purdila44345722010-12-13 12:44:07 +00001362 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001363 net_dmaengine_put();
1364 }
1365
1366 return 0;
1367}
1368
1369static int __dev_close(struct net_device *dev)
1370{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001371 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001372 LIST_HEAD(single);
1373
1374 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001375 retval = __dev_close_many(&single);
1376 list_del(&single);
1377 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001378}
1379
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001380static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001381{
1382 struct net_device *dev, *tmp;
1383 LIST_HEAD(tmp_list);
1384
1385 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1386 if (!(dev->flags & IFF_UP))
1387 list_move(&dev->unreg_list, &tmp_list);
1388
1389 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001390
Octavian Purdila44345722010-12-13 12:44:07 +00001391 list_for_each_entry(dev, head, unreg_list) {
1392 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1393 call_netdevice_notifiers(NETDEV_DOWN, dev);
1394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Octavian Purdila44345722010-12-13 12:44:07 +00001396 /* rollback_registered_many needs the complete original list */
1397 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399}
Patrick McHardybd380812010-02-26 06:34:53 +00001400
1401/**
1402 * dev_close - shutdown an interface.
1403 * @dev: device to shutdown
1404 *
1405 * This function moves an active device into down state. A
1406 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1407 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1408 * chain.
1409 */
1410int dev_close(struct net_device *dev)
1411{
Eric Dumazete14a5992011-05-10 12:26:06 -07001412 if (dev->flags & IFF_UP) {
1413 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001414
Eric Dumazete14a5992011-05-10 12:26:06 -07001415 list_add(&dev->unreg_list, &single);
1416 dev_close_many(&single);
1417 list_del(&single);
1418 }
Patrick McHardybd380812010-02-26 06:34:53 +00001419 return 0;
1420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001421EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001424/**
1425 * dev_disable_lro - disable Large Receive Offload on a device
1426 * @dev: device
1427 *
1428 * Disable Large Receive Offload (LRO) on a net device. Must be
1429 * called under RTNL. This is needed if received packets may be
1430 * forwarded to another interface.
1431 */
1432void dev_disable_lro(struct net_device *dev)
1433{
Neil Hormanf11970e2011-05-24 08:31:09 +00001434 /*
1435 * If we're trying to disable lro on a vlan device
1436 * use the underlying physical device instead
1437 */
1438 if (is_vlan_dev(dev))
1439 dev = vlan_dev_real_dev(dev);
1440
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001441 dev->wanted_features &= ~NETIF_F_LRO;
1442 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001443
Michał Mirosław22d59692011-04-21 12:42:15 +00001444 if (unlikely(dev->features & NETIF_F_LRO))
1445 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001446}
1447EXPORT_SYMBOL(dev_disable_lro);
1448
1449
Eric W. Biederman881d9662007-09-17 11:56:21 -07001450static int dev_boot_phase = 1;
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452/**
1453 * register_netdevice_notifier - register a network notifier block
1454 * @nb: notifier
1455 *
1456 * Register a notifier to be called when network device events occur.
1457 * The notifier passed is linked into the kernel structures and must
1458 * not be reused until it has been unregistered. A negative errno code
1459 * is returned on a failure.
1460 *
1461 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 * view of the network device list.
1464 */
1465
1466int register_netdevice_notifier(struct notifier_block *nb)
1467{
1468 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001470 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 int err;
1472
1473 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001474 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001475 if (err)
1476 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001477 if (dev_boot_phase)
1478 goto unlock;
1479 for_each_net(net) {
1480 for_each_netdev(net, dev) {
1481 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1482 err = notifier_to_errno(err);
1483 if (err)
1484 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Eric W. Biederman881d9662007-09-17 11:56:21 -07001486 if (!(dev->flags & IFF_UP))
1487 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001488
Eric W. Biederman881d9662007-09-17 11:56:21 -07001489 nb->notifier_call(nb, NETDEV_UP, dev);
1490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001492
1493unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 rtnl_unlock();
1495 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001496
1497rollback:
1498 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001499 for_each_net(net) {
1500 for_each_netdev(net, dev) {
1501 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001502 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001503
Eric W. Biederman881d9662007-09-17 11:56:21 -07001504 if (dev->flags & IFF_UP) {
1505 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1506 nb->notifier_call(nb, NETDEV_DOWN, dev);
1507 }
1508 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001510 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001511
RongQing.Li8f891482011-11-30 23:43:07 -05001512outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001513 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001514 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001516EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518/**
1519 * unregister_netdevice_notifier - unregister a network notifier block
1520 * @nb: notifier
1521 *
1522 * Unregister a notifier previously registered by
1523 * register_netdevice_notifier(). The notifier is unlinked into the
1524 * kernel structures and may then be reused. A negative errno code
1525 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001526 *
1527 * After unregistering unregister and down device events are synthesized
1528 * for all devices on the device list to the removed notifier to remove
1529 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 */
1531
1532int unregister_netdevice_notifier(struct notifier_block *nb)
1533{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001534 struct net_device *dev;
1535 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001536 int err;
1537
1538 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001539 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001540 if (err)
1541 goto unlock;
1542
1543 for_each_net(net) {
1544 for_each_netdev(net, dev) {
1545 if (dev->flags & IFF_UP) {
1546 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1547 nb->notifier_call(nb, NETDEV_DOWN, dev);
1548 }
1549 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001550 }
1551 }
1552unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001553 rtnl_unlock();
1554 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001556EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
1558/**
1559 * call_netdevice_notifiers - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001561 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 *
1563 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001564 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 */
1566
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001567int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
Jiri Pirkoab930472010-04-20 01:45:37 -07001569 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001570 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001572EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Ingo Molnarc5905af2012-02-24 08:31:31 +01001574static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001575#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001576/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001577 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001578 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001579 */
1580static atomic_t netstamp_needed_deferred;
1581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583void net_enable_timestamp(void)
1584{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001585#ifdef HAVE_JUMP_LABEL
1586 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1587
1588 if (deferred) {
1589 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001590 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001591 return;
1592 }
1593#endif
1594 WARN_ON(in_interrupt());
Ingo Molnarc5905af2012-02-24 08:31:31 +01001595 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001597EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599void net_disable_timestamp(void)
1600{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601#ifdef HAVE_JUMP_LABEL
1602 if (in_interrupt()) {
1603 atomic_inc(&netstamp_needed_deferred);
1604 return;
1605 }
1606#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001607 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001609EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Eric Dumazet3b098e22010-05-15 23:57:10 -07001611static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612{
Eric Dumazet588f0332011-11-15 04:12:55 +00001613 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001614 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001615 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
1617
Eric Dumazet588f0332011-11-15 04:12:55 +00001618#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001619 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001620 if ((COND) && !(SKB)->tstamp.tv64) \
1621 __net_timestamp(SKB); \
1622 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001623
Richard Cochran4dc360c2011-10-19 17:00:35 -04001624static int net_hwtstamp_validate(struct ifreq *ifr)
1625{
1626 struct hwtstamp_config cfg;
1627 enum hwtstamp_tx_types tx_type;
1628 enum hwtstamp_rx_filters rx_filter;
1629 int tx_type_valid = 0;
1630 int rx_filter_valid = 0;
1631
1632 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1633 return -EFAULT;
1634
1635 if (cfg.flags) /* reserved for future extensions */
1636 return -EINVAL;
1637
1638 tx_type = cfg.tx_type;
1639 rx_filter = cfg.rx_filter;
1640
1641 switch (tx_type) {
1642 case HWTSTAMP_TX_OFF:
1643 case HWTSTAMP_TX_ON:
1644 case HWTSTAMP_TX_ONESTEP_SYNC:
1645 tx_type_valid = 1;
1646 break;
1647 }
1648
1649 switch (rx_filter) {
1650 case HWTSTAMP_FILTER_NONE:
1651 case HWTSTAMP_FILTER_ALL:
1652 case HWTSTAMP_FILTER_SOME:
1653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1654 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1657 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1658 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1659 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1660 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1662 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1663 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1664 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1665 rx_filter_valid = 1;
1666 break;
1667 }
1668
1669 if (!tx_type_valid || !rx_filter_valid)
1670 return -ERANGE;
1671
1672 return 0;
1673}
1674
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001675static inline bool is_skb_forwardable(struct net_device *dev,
1676 struct sk_buff *skb)
1677{
1678 unsigned int len;
1679
1680 if (!(dev->flags & IFF_UP))
1681 return false;
1682
1683 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1684 if (skb->len <= len)
1685 return true;
1686
1687 /* if TSO is enabled, we don't care about the length as the packet
1688 * could be forwarded without being segmented before
1689 */
1690 if (skb_is_gso(skb))
1691 return true;
1692
1693 return false;
1694}
1695
Arnd Bergmann44540962009-11-26 06:07:08 +00001696/**
1697 * dev_forward_skb - loopback an skb to another netif
1698 *
1699 * @dev: destination network device
1700 * @skb: buffer to forward
1701 *
1702 * return values:
1703 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001704 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001705 *
1706 * dev_forward_skb can be used for injecting an skb from the
1707 * start_xmit function of one device into the receive queue
1708 * of another device.
1709 *
1710 * The receiving device may be in another namespace, so
1711 * we have to clear all information in the skb that could
1712 * impact namespace isolation.
1713 */
1714int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1715{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001716 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1717 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1718 atomic_long_inc(&dev->rx_dropped);
1719 kfree_skb(skb);
1720 return NET_RX_DROP;
1721 }
1722 }
1723
Arnd Bergmann44540962009-11-26 06:07:08 +00001724 skb_orphan(skb);
Ben Greearc736eef2010-07-22 09:54:47 +00001725 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001726
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001727 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001728 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001729 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001730 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001731 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001732 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001733 skb->dev = dev;
1734 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001735 skb->tstamp.tv64 = 0;
1736 skb->pkt_type = PACKET_HOST;
1737 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001738 skb->mark = 0;
1739 secpath_reset(skb);
1740 nf_reset(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001741 return netif_rx(skb);
1742}
1743EXPORT_SYMBOL_GPL(dev_forward_skb);
1744
Changli Gao71d9dec2010-12-15 19:57:25 +00001745static inline int deliver_skb(struct sk_buff *skb,
1746 struct packet_type *pt_prev,
1747 struct net_device *orig_dev)
1748{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001749 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1750 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001751 atomic_inc(&skb->users);
1752 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1753}
1754
Eric Leblondc0de08d2012-08-16 22:02:58 +00001755static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1756{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001757 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001758 return false;
1759
1760 if (ptype->id_match)
1761 return ptype->id_match(ptype, skb->sk);
1762 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1763 return true;
1764
1765 return false;
1766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/*
1769 * Support routine. Sends outgoing frames to any network
1770 * taps currently in use.
1771 */
1772
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001773static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001776 struct sk_buff *skb2 = NULL;
1777 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_lock();
1780 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1781 /* Never send packets back to the socket
1782 * they originated from - MvS (miquels@drinkel.ow.org)
1783 */
1784 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001785 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001786 if (pt_prev) {
1787 deliver_skb(skb2, pt_prev, skb->dev);
1788 pt_prev = ptype;
1789 continue;
1790 }
1791
1792 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (!skb2)
1794 break;
1795
Eric Dumazet70978182010-12-20 21:22:51 +00001796 net_timestamp_set(skb2);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 /* skb->nh should be correctly
1799 set by sender, so that the second statement is
1800 just protection against buggy protocols.
1801 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001802 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001804 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001805 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001806 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1807 ntohs(skb2->protocol),
1808 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001809 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001812 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001814 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001817 if (pt_prev)
1818 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rcu_read_unlock();
1820}
1821
Ben Hutchings2c530402012-07-10 10:55:09 +00001822/**
1823 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001824 * @dev: Network device
1825 * @txq: number of queues available
1826 *
1827 * If real_num_tx_queues is changed the tc mappings may no longer be
1828 * valid. To resolve this verify the tc mapping remains valid and if
1829 * not NULL the mapping. With no priorities mapping to this
1830 * offset/count pair it will no longer be used. In the worst case TC0
1831 * is invalid nothing can be done so disable priority mappings. If is
1832 * expected that drivers will fix this mapping if they can before
1833 * calling netif_set_real_num_tx_queues.
1834 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001835static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001836{
1837 int i;
1838 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1839
1840 /* If TC0 is invalidated disable TC mapping */
1841 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001842 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001843 dev->num_tc = 0;
1844 return;
1845 }
1846
1847 /* Invalidated prio to tc mappings set to TC0 */
1848 for (i = 1; i < TC_BITMASK + 1; i++) {
1849 int q = netdev_get_prio_tc_map(dev, i);
1850
1851 tc = &dev->tc_to_txq[q];
1852 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001853 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1854 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001855 netdev_set_prio_tc_map(dev, i, 0);
1856 }
1857 }
1858}
1859
Alexander Duyck537c00d2013-01-10 08:57:02 +00001860#ifdef CONFIG_XPS
1861static DEFINE_MUTEX(xps_map_mutex);
1862#define xmap_dereference(P) \
1863 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1864
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001865static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1866 int cpu, u16 index)
1867{
1868 struct xps_map *map = NULL;
1869 int pos;
1870
1871 if (dev_maps)
1872 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1873
1874 for (pos = 0; map && pos < map->len; pos++) {
1875 if (map->queues[pos] == index) {
1876 if (map->len > 1) {
1877 map->queues[pos] = map->queues[--map->len];
1878 } else {
1879 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1880 kfree_rcu(map, rcu);
1881 map = NULL;
1882 }
1883 break;
1884 }
1885 }
1886
1887 return map;
1888}
1889
Alexander Duyck537c00d2013-01-10 08:57:02 +00001890void netif_reset_xps_queue(struct net_device *dev, u16 index)
1891{
1892 struct xps_dev_maps *dev_maps;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001893 int cpu;
1894 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001895
1896 mutex_lock(&xps_map_mutex);
1897 dev_maps = xmap_dereference(dev->xps_maps);
1898
1899 if (!dev_maps)
1900 goto out_no_maps;
1901
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001902 for_each_possible_cpu(cpu) {
1903 if (remove_xps_queue(dev_maps, cpu, index))
1904 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001905 }
1906
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001907 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001908 RCU_INIT_POINTER(dev->xps_maps, NULL);
1909 kfree_rcu(dev_maps, rcu);
1910 }
1911
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001912 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1913 NUMA_NO_NODE);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001914out_no_maps:
1915 mutex_unlock(&xps_map_mutex);
1916}
1917
1918int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1919{
1920 int i, cpu, pos, map_len, alloc_len, need_set;
1921 struct xps_map *map, *new_map;
1922 struct xps_dev_maps *dev_maps, *new_dev_maps;
1923 int nonempty = 0;
1924 int numa_node_id = -2;
1925 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1926
1927 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1928 if (!new_dev_maps)
1929 return -ENOMEM;
1930
1931 mutex_lock(&xps_map_mutex);
1932
1933 dev_maps = xmap_dereference(dev->xps_maps);
1934
1935 for_each_possible_cpu(cpu) {
1936 map = dev_maps ?
1937 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1938 new_map = map;
1939 if (map) {
1940 for (pos = 0; pos < map->len; pos++)
1941 if (map->queues[pos] == index)
1942 break;
1943 map_len = map->len;
1944 alloc_len = map->alloc_len;
1945 } else
1946 pos = map_len = alloc_len = 0;
1947
1948 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1949#ifdef CONFIG_NUMA
1950 if (need_set) {
1951 if (numa_node_id == -2)
1952 numa_node_id = cpu_to_node(cpu);
1953 else if (numa_node_id != cpu_to_node(cpu))
1954 numa_node_id = -1;
1955 }
1956#endif
1957 if (need_set && pos >= map_len) {
1958 /* Need to add queue to this CPU's map */
1959 if (map_len >= alloc_len) {
1960 alloc_len = alloc_len ?
1961 2 * alloc_len : XPS_MIN_MAP_ALLOC;
1962 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1963 GFP_KERNEL,
1964 cpu_to_node(cpu));
1965 if (!new_map)
1966 goto error;
1967 new_map->alloc_len = alloc_len;
1968 for (i = 0; i < map_len; i++)
1969 new_map->queues[i] = map->queues[i];
1970 new_map->len = map_len;
1971 }
1972 new_map->queues[new_map->len++] = index;
1973 } else if (!need_set && pos < map_len) {
1974 /* Need to remove queue from this CPU's map */
1975 if (map_len > 1)
1976 new_map->queues[pos] =
1977 new_map->queues[--new_map->len];
1978 else
1979 new_map = NULL;
1980 }
1981 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1982 }
1983
1984 /* Cleanup old maps */
1985 for_each_possible_cpu(cpu) {
1986 map = dev_maps ?
1987 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1988 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1989 kfree_rcu(map, rcu);
1990 if (new_dev_maps->cpu_map[cpu])
1991 nonempty = 1;
1992 }
1993
1994 if (nonempty) {
1995 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1996 } else {
1997 kfree(new_dev_maps);
1998 RCU_INIT_POINTER(dev->xps_maps, NULL);
1999 }
2000
2001 if (dev_maps)
2002 kfree_rcu(dev_maps, rcu);
2003
2004 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2005 (numa_node_id >= 0) ? numa_node_id :
2006 NUMA_NO_NODE);
2007
2008 mutex_unlock(&xps_map_mutex);
2009
2010 return 0;
2011error:
2012 mutex_unlock(&xps_map_mutex);
2013
2014 if (new_dev_maps)
2015 for_each_possible_cpu(i)
2016 kfree(rcu_dereference_protected(
2017 new_dev_maps->cpu_map[i],
2018 1));
2019 kfree(new_dev_maps);
2020 return -ENOMEM;
2021}
2022EXPORT_SYMBOL(netif_set_xps_queue);
2023
2024#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002025/*
2026 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2027 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2028 */
Tom Herberte6484932010-10-18 18:04:39 +00002029int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002030{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002031 int rc;
2032
Tom Herberte6484932010-10-18 18:04:39 +00002033 if (txq < 1 || txq > dev->num_tx_queues)
2034 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002035
Ben Hutchings5c565802011-02-15 19:39:21 +00002036 if (dev->reg_state == NETREG_REGISTERED ||
2037 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002038 ASSERT_RTNL();
2039
Tom Herbert1d24eb42010-11-21 13:17:27 +00002040 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2041 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002042 if (rc)
2043 return rc;
2044
John Fastabend4f57c082011-01-17 08:06:04 +00002045 if (dev->num_tc)
2046 netif_setup_tc(dev, txq);
2047
Tom Herberte6484932010-10-18 18:04:39 +00002048 if (txq < dev->real_num_tx_queues)
2049 qdisc_reset_all_tx_gt(dev, txq);
John Fastabendf0796d52010-07-01 13:21:57 +00002050 }
Tom Herberte6484932010-10-18 18:04:39 +00002051
2052 dev->real_num_tx_queues = txq;
2053 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002054}
2055EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002056
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002057#ifdef CONFIG_RPS
2058/**
2059 * netif_set_real_num_rx_queues - set actual number of RX queues used
2060 * @dev: Network device
2061 * @rxq: Actual number of RX queues
2062 *
2063 * This must be called either with the rtnl_lock held or before
2064 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002065 * negative error code. If called before registration, it always
2066 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002067 */
2068int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2069{
2070 int rc;
2071
Tom Herbertbd25fa72010-10-18 18:00:16 +00002072 if (rxq < 1 || rxq > dev->num_rx_queues)
2073 return -EINVAL;
2074
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002075 if (dev->reg_state == NETREG_REGISTERED) {
2076 ASSERT_RTNL();
2077
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002078 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2079 rxq);
2080 if (rc)
2081 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002082 }
2083
2084 dev->real_num_rx_queues = rxq;
2085 return 0;
2086}
2087EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2088#endif
2089
Ben Hutchings2c530402012-07-10 10:55:09 +00002090/**
2091 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002092 *
2093 * This routine should set an upper limit on the number of RSS queues
2094 * used by default by multiqueue devices.
2095 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002096int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002097{
2098 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2099}
2100EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2101
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002102static inline void __netif_reschedule(struct Qdisc *q)
2103{
2104 struct softnet_data *sd;
2105 unsigned long flags;
2106
2107 local_irq_save(flags);
2108 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002109 q->next_sched = NULL;
2110 *sd->output_queue_tailp = q;
2111 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002112 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2113 local_irq_restore(flags);
2114}
2115
David S. Miller37437bb2008-07-16 02:15:04 -07002116void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002117{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002118 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2119 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002120}
2121EXPORT_SYMBOL(__netif_schedule);
2122
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002123void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002124{
David S. Miller3578b0c2010-08-03 00:24:04 -07002125 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002126 struct softnet_data *sd;
2127 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002128
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002129 local_irq_save(flags);
2130 sd = &__get_cpu_var(softnet_data);
2131 skb->next = sd->completion_queue;
2132 sd->completion_queue = skb;
2133 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2134 local_irq_restore(flags);
2135 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002136}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002137EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002138
2139void dev_kfree_skb_any(struct sk_buff *skb)
2140{
2141 if (in_irq() || irqs_disabled())
2142 dev_kfree_skb_irq(skb);
2143 else
2144 dev_kfree_skb(skb);
2145}
2146EXPORT_SYMBOL(dev_kfree_skb_any);
2147
2148
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002149/**
2150 * netif_device_detach - mark device as removed
2151 * @dev: network device
2152 *
2153 * Mark device as removed from system and therefore no longer available.
2154 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002155void netif_device_detach(struct net_device *dev)
2156{
2157 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2158 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002159 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002160 }
2161}
2162EXPORT_SYMBOL(netif_device_detach);
2163
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002164/**
2165 * netif_device_attach - mark device as attached
2166 * @dev: network device
2167 *
2168 * Mark device as attached from system and restart if needed.
2169 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002170void netif_device_attach(struct net_device *dev)
2171{
2172 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2173 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002174 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002175 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002176 }
2177}
2178EXPORT_SYMBOL(netif_device_attach);
2179
Ben Hutchings36c92472012-01-17 07:57:56 +00002180static void skb_warn_bad_offload(const struct sk_buff *skb)
2181{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002182 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002183 struct net_device *dev = skb->dev;
2184 const char *driver = "";
2185
2186 if (dev && dev->dev.parent)
2187 driver = dev_driver_string(dev->dev.parent);
2188
2189 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2190 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002191 driver, dev ? &dev->features : &null_features,
2192 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002193 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2194 skb_shinfo(skb)->gso_type, skb->ip_summed);
2195}
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197/*
2198 * Invalidate hardware checksum when packet is to be mangled, and
2199 * complete checksum manually on outgoing path.
2200 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002201int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
Al Virod3bc23e2006-11-14 21:24:49 -08002203 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002204 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Patrick McHardy84fa7932006-08-29 16:44:56 -07002206 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002207 goto out_set_summed;
2208
2209 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002210 skb_warn_bad_offload(skb);
2211 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 }
2213
Michał Mirosław55508d62010-12-14 15:24:08 +00002214 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002215 BUG_ON(offset >= skb_headlen(skb));
2216 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2217
2218 offset += skb->csum_offset;
2219 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2220
2221 if (skb_cloned(skb) &&
2222 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2224 if (ret)
2225 goto out;
2226 }
2227
Herbert Xua0308472007-10-15 01:47:15 -07002228 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002229out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002231out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 return ret;
2233}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002234EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002236/**
2237 * skb_gso_segment - Perform segmentation on skb.
2238 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002239 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002240 *
2241 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002242 *
2243 * It may return NULL if the skb requires no segmentation. This is
2244 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002245 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002246struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2247 netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002248{
2249 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
Vlad Yasevich22061d82012-11-15 08:49:11 +00002250 struct packet_offload *ptype;
Al Viro252e3342006-11-14 20:48:11 -08002251 __be16 type = skb->protocol;
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002252 int vlan_depth = ETH_HLEN;
Herbert Xua430a432006-07-08 13:34:56 -07002253 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002254
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002255 while (type == htons(ETH_P_8021Q)) {
2256 struct vlan_hdr *vh;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002257
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002258 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
Jesse Gross7b9c6092010-10-20 13:56:04 +00002259 return ERR_PTR(-EINVAL);
2260
Jesse Grossc8d5bcd2010-10-29 12:14:54 +00002261 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2262 type = vh->h_vlan_encapsulated_proto;
2263 vlan_depth += VLAN_HLEN;
Jesse Gross7b9c6092010-10-20 13:56:04 +00002264 }
2265
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07002266 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002267 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002268 __skb_pull(skb, skb->mac_len);
2269
Herbert Xu67fd1a72009-01-19 16:26:44 -08002270 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002271 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002272
Herbert Xua430a432006-07-08 13:34:56 -07002273 if (skb_header_cloned(skb) &&
2274 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2275 return ERR_PTR(err);
2276 }
2277
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002278 rcu_read_lock();
Vlad Yasevich22061d82012-11-15 08:49:11 +00002279 list_for_each_entry_rcu(ptype, &offload_base, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002280 if (ptype->type == type && ptype->callbacks.gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07002281 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002282 err = ptype->callbacks.gso_send_check(skb);
Herbert Xua430a432006-07-08 13:34:56 -07002283 segs = ERR_PTR(err);
2284 if (err || skb_gso_ok(skb, features))
2285 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07002286 __skb_push(skb, (skb->data -
2287 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07002288 }
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002289 segs = ptype->callbacks.gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002290 break;
2291 }
2292 }
2293 rcu_read_unlock();
2294
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07002295 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07002296
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002297 return segs;
2298}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002299EXPORT_SYMBOL(skb_gso_segment);
2300
Herbert Xufb286bb2005-11-10 13:01:24 -08002301/* Take action when hardware reception checksum errors are detected. */
2302#ifdef CONFIG_BUG
2303void netdev_rx_csum_fault(struct net_device *dev)
2304{
2305 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002306 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002307 dump_stack();
2308 }
2309}
2310EXPORT_SYMBOL(netdev_rx_csum_fault);
2311#endif
2312
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313/* Actually, we should eliminate this check as soon as we know, that:
2314 * 1. IOMMU is present and allows to map all the memory.
2315 * 2. No high memory really exists on this machine.
2316 */
2317
Eric Dumazet9092c652010-04-02 13:34:49 -07002318static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002320#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002322 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002323 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2324 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2325 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002326 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002327 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002330 if (PCI_DMA_BUS_IS_PHYS) {
2331 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Eric Dumazet9092c652010-04-02 13:34:49 -07002333 if (!pdev)
2334 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002335 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002336 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2337 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002338 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2339 return 1;
2340 }
2341 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002342#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 return 0;
2344}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002346struct dev_gso_cb {
2347 void (*destructor)(struct sk_buff *skb);
2348};
2349
2350#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2351
2352static void dev_gso_skb_destructor(struct sk_buff *skb)
2353{
2354 struct dev_gso_cb *cb;
2355
2356 do {
2357 struct sk_buff *nskb = skb->next;
2358
2359 skb->next = nskb->next;
2360 nskb->next = NULL;
2361 kfree_skb(nskb);
2362 } while (skb->next);
2363
2364 cb = DEV_GSO_CB(skb);
2365 if (cb->destructor)
2366 cb->destructor(skb);
2367}
2368
2369/**
2370 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2371 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002372 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002373 *
2374 * This function segments the given skb and stores the list of segments
2375 * in skb->next.
2376 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002377static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002378{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002379 struct sk_buff *segs;
2380
Herbert Xu576a30e2006-06-27 13:22:38 -07002381 segs = skb_gso_segment(skb, features);
2382
2383 /* Verifying header integrity only. */
2384 if (!segs)
2385 return 0;
2386
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002387 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002388 return PTR_ERR(segs);
2389
2390 skb->next = segs;
2391 DEV_GSO_CB(skb)->destructor = skb->destructor;
2392 skb->destructor = dev_gso_skb_destructor;
2393
2394 return 0;
2395}
2396
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002397static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
Jesse Gross03634662011-01-09 06:23:35 +00002398{
2399 return ((features & NETIF_F_GEN_CSUM) ||
2400 ((features & NETIF_F_V4_CSUM) &&
2401 protocol == htons(ETH_P_IP)) ||
2402 ((features & NETIF_F_V6_CSUM) &&
2403 protocol == htons(ETH_P_IPV6)) ||
2404 ((features & NETIF_F_FCOE_CRC) &&
2405 protocol == htons(ETH_P_FCOE)));
2406}
2407
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002408static netdev_features_t harmonize_features(struct sk_buff *skb,
2409 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002410{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002411 if (skb->ip_summed != CHECKSUM_NONE &&
2412 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002413 features &= ~NETIF_F_ALL_CSUM;
2414 features &= ~NETIF_F_SG;
2415 } else if (illegal_highdma(skb->dev, skb)) {
2416 features &= ~NETIF_F_SG;
2417 }
2418
2419 return features;
2420}
2421
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002422netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002423{
2424 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002425 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002426
Ben Hutchings30b678d2012-07-30 15:57:00 +00002427 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2428 features &= ~NETIF_F_GSO_MASK;
2429
Jesse Gross58e998c2010-10-29 12:14:55 +00002430 if (protocol == htons(ETH_P_8021Q)) {
2431 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2432 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002433 } else if (!vlan_tx_tag_present(skb)) {
2434 return harmonize_features(skb, protocol, features);
2435 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002436
Jesse Gross6ee400a2011-01-17 20:46:00 +00002437 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002438
2439 if (protocol != htons(ETH_P_8021Q)) {
2440 return harmonize_features(skb, protocol, features);
2441 } else {
2442 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Jesse Gross6ee400a2011-01-17 20:46:00 +00002443 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002444 return harmonize_features(skb, protocol, features);
2445 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002446}
Jesse Grossf01a5232011-01-09 06:23:31 +00002447EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002448
John Fastabend6afff0c2010-06-16 14:18:12 +00002449/*
2450 * Returns true if either:
2451 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002452 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002453 */
2454static inline int skb_needs_linearize(struct sk_buff *skb,
Jesse Gross02932ce2011-01-09 06:23:34 +00002455 int features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002456{
Jesse Gross02932ce2011-01-09 06:23:34 +00002457 return skb_is_nonlinear(skb) &&
2458 ((skb_has_frag_list(skb) &&
2459 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002460 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002461 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002462}
2463
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002464int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2465 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002466{
Stephen Hemminger00829822008-11-20 20:14:53 -08002467 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002468 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002469 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002470
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002471 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002472 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002473
Eric Dumazet93f154b2009-05-18 22:19:19 -07002474 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002475 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002476 * its hot in this cpu cache
2477 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002478 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2479 skb_dst_drop(skb);
2480
Jesse Grossfc741212011-01-09 06:23:32 +00002481 features = netif_skb_features(skb);
2482
Jesse Gross7b9c6092010-10-20 13:56:04 +00002483 if (vlan_tx_tag_present(skb) &&
Jesse Grossfc741212011-01-09 06:23:32 +00002484 !(features & NETIF_F_HW_VLAN_TX)) {
Jesse Gross7b9c6092010-10-20 13:56:04 +00002485 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2486 if (unlikely(!skb))
2487 goto out;
2488
2489 skb->vlan_tci = 0;
2490 }
2491
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002492 /* If encapsulation offload request, verify we are testing
2493 * hardware encapsulation features instead of standard
2494 * features for the netdev
2495 */
2496 if (skb->encapsulation)
2497 features &= dev->hw_enc_features;
2498
Jesse Grossfc741212011-01-09 06:23:32 +00002499 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002500 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002501 goto out_kfree_skb;
2502 if (skb->next)
2503 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002504 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002505 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002506 __skb_linearize(skb))
2507 goto out_kfree_skb;
2508
2509 /* If packet is not checksummed and device does not
2510 * support checksumming for this protocol, complete
2511 * checksumming here.
2512 */
2513 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002514 if (skb->encapsulation)
2515 skb_set_inner_transport_header(skb,
2516 skb_checksum_start_offset(skb));
2517 else
2518 skb_set_transport_header(skb,
2519 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002520 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002521 skb_checksum_help(skb))
2522 goto out_kfree_skb;
2523 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002524 }
2525
Eric Dumazetb40863c2012-09-18 20:44:49 +00002526 if (!list_empty(&ptype_all))
2527 dev_queue_xmit_nit(skb, dev);
2528
Koki Sanagiec764bf2011-05-30 21:48:34 +00002529 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002530 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002531 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002532 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002533 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002534 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002535 }
2536
Herbert Xu576a30e2006-06-27 13:22:38 -07002537gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002538 do {
2539 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002540
2541 skb->next = nskb->next;
2542 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002543
2544 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002545 * If device doesn't need nskb->dst, release it right now while
Krishna Kumar068a2de2009-12-09 20:59:58 +00002546 * its hot in this cpu cache
2547 */
2548 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2549 skb_dst_drop(nskb);
2550
Eric Dumazetb40863c2012-09-18 20:44:49 +00002551 if (!list_empty(&ptype_all))
2552 dev_queue_xmit_nit(nskb, dev);
2553
Koki Sanagiec764bf2011-05-30 21:48:34 +00002554 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002555 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002556 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002557 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002558 if (rc & ~NETDEV_TX_MASK)
2559 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002560 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002561 skb->next = nskb;
2562 return rc;
2563 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002564 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002565 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002566 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002567 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002568
Patrick McHardy572a9d72009-11-10 06:14:14 +00002569out_kfree_gso_skb:
2570 if (likely(skb->next == NULL))
2571 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002572out_kfree_skb:
2573 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002574out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002575 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002576}
2577
Tom Herbert0a9627f2010-03-16 08:03:29 +00002578static u32 hashrnd __read_mostly;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002579
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002580/*
2581 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2582 * to be used as a distribution range.
2583 */
2584u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2585 unsigned int num_tx_queues)
David S. Miller8f0f2222008-07-15 03:47:03 -07002586{
David S. Miller70192982009-01-27 16:34:47 -08002587 u32 hash;
John Fastabend4f57c082011-01-17 08:06:04 +00002588 u16 qoffset = 0;
2589 u16 qcount = num_tx_queues;
David S. Millerb6b2fed2008-07-21 09:48:06 -07002590
David S. Miller513de112009-05-03 14:43:10 -07002591 if (skb_rx_queue_recorded(skb)) {
2592 hash = skb_get_rx_queue(skb);
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002593 while (unlikely(hash >= num_tx_queues))
2594 hash -= num_tx_queues;
David S. Miller513de112009-05-03 14:43:10 -07002595 return hash;
2596 }
Eric Dumazetec581f62009-05-01 09:05:06 -07002597
John Fastabend4f57c082011-01-17 08:06:04 +00002598 if (dev->num_tc) {
2599 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2600 qoffset = dev->tc_to_txq[tc].offset;
2601 qcount = dev->tc_to_txq[tc].count;
2602 }
2603
Eric Dumazetec581f62009-05-01 09:05:06 -07002604 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08002605 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07002606 else
Eric Dumazet62b1a8a2012-06-14 06:42:44 +00002607 hash = (__force u16) skb->protocol;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002608 hash = jhash_1word(hash, hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08002609
John Fastabend4f57c082011-01-17 08:06:04 +00002610 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
David S. Miller8f0f2222008-07-15 03:47:03 -07002611}
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002612EXPORT_SYMBOL(__skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07002613
Eric Dumazeted046422009-11-13 21:54:04 +00002614static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2615{
2616 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
Joe Perchese87cc472012-05-13 21:56:26 +00002617 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2618 dev->name, queue_index,
2619 dev->real_num_tx_queues);
Eric Dumazeted046422009-11-13 21:54:04 +00002620 return 0;
2621 }
2622 return queue_index;
2623}
2624
Tom Herbert1d24eb42010-11-21 13:17:27 +00002625static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2626{
Tom Herbertbf264142010-11-26 08:36:09 +00002627#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00002628 struct xps_dev_maps *dev_maps;
2629 struct xps_map *map;
2630 int queue_index = -1;
2631
2632 rcu_read_lock();
2633 dev_maps = rcu_dereference(dev->xps_maps);
2634 if (dev_maps) {
2635 map = rcu_dereference(
2636 dev_maps->cpu_map[raw_smp_processor_id()]);
2637 if (map) {
2638 if (map->len == 1)
2639 queue_index = map->queues[0];
2640 else {
2641 u32 hash;
2642 if (skb->sk && skb->sk->sk_hash)
2643 hash = skb->sk->sk_hash;
2644 else
2645 hash = (__force u16) skb->protocol ^
2646 skb->rxhash;
2647 hash = jhash_1word(hash, hashrnd);
2648 queue_index = map->queues[
2649 ((u64)hash * map->len) >> 32];
2650 }
2651 if (unlikely(queue_index >= dev->real_num_tx_queues))
2652 queue_index = -1;
2653 }
2654 }
2655 rcu_read_unlock();
2656
2657 return queue_index;
2658#else
2659 return -1;
2660#endif
2661}
2662
Alexander Duyck416186f2013-01-10 08:56:51 +00002663u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2664{
2665 struct sock *sk = skb->sk;
2666 int queue_index = sk_tx_queue_get(sk);
2667
2668 if (queue_index < 0 || skb->ooo_okay ||
2669 queue_index >= dev->real_num_tx_queues) {
2670 int new_index = get_xps_queue(dev, skb);
2671 if (new_index < 0)
2672 new_index = skb_tx_hash(dev, skb);
2673
2674 if (queue_index != new_index && sk) {
2675 struct dst_entry *dst =
2676 rcu_dereference_check(sk->sk_dst_cache, 1);
2677
2678 if (dst && skb_dst(skb) == dst)
2679 sk_tx_queue_set(sk, queue_index);
2680
2681 }
2682
2683 queue_index = new_index;
2684 }
2685
2686 return queue_index;
2687}
2688
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002689struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2690 struct sk_buff *skb)
David S. Millere8a04642008-07-17 00:34:19 -07002691{
Alexander Duyck416186f2013-01-10 08:56:51 +00002692 int queue_index = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002693
Alexander Duyck416186f2013-01-10 08:56:51 +00002694 if (dev->real_num_tx_queues != 1) {
2695 const struct net_device_ops *ops = dev->netdev_ops;
2696 if (ops->ndo_select_queue)
2697 queue_index = ops->ndo_select_queue(dev, skb);
2698 else
2699 queue_index = __netdev_pick_tx(dev, skb);
Helmut Schaadeabc772010-09-03 02:39:56 +00002700 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002701 }
David S. Millereae792b2008-07-15 03:03:33 -07002702
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002703 skb_set_queue_mapping(skb, queue_index);
2704 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07002705}
2706
Eric Dumazet1def9232013-01-10 12:36:42 +00002707static void qdisc_pkt_len_init(struct sk_buff *skb)
2708{
2709 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2710
2711 qdisc_skb_cb(skb)->pkt_len = skb->len;
2712
2713 /* To get more precise estimation of bytes sent on wire,
2714 * we add to pkt_len the headers size of all segments
2715 */
2716 if (shinfo->gso_size) {
2717 unsigned int hdr_len = skb_transport_offset(skb);
2718
2719 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2720 hdr_len += tcp_hdrlen(skb);
2721 else
2722 hdr_len += sizeof(struct udphdr);
2723 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
2724 }
2725}
2726
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002727static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2728 struct net_device *dev,
2729 struct netdev_queue *txq)
2730{
2731 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002732 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002733 int rc;
2734
Eric Dumazet1def9232013-01-10 12:36:42 +00002735 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002736 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002737 /*
2738 * Heuristic to force contended enqueues to serialize on a
2739 * separate lock before trying to get qdisc main lock.
2740 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2741 * and dequeue packets faster.
2742 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002743 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002744 if (unlikely(contended))
2745 spin_lock(&q->busylock);
2746
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002747 spin_lock(root_lock);
2748 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2749 kfree_skb(skb);
2750 rc = NET_XMIT_DROP;
2751 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002752 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002753 /*
2754 * This is a work-conserving queue; there are no old skbs
2755 * waiting to be sent out; and the qdisc is not running -
2756 * xmit the skb directly.
2757 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002758 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2759 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002760
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002761 qdisc_bstats_update(q, skb);
2762
Eric Dumazet79640a42010-06-02 05:09:29 -07002763 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2764 if (unlikely(contended)) {
2765 spin_unlock(&q->busylock);
2766 contended = false;
2767 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002768 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002769 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002770 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002771
2772 rc = NET_XMIT_SUCCESS;
2773 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002774 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002775 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002776 if (qdisc_run_begin(q)) {
2777 if (unlikely(contended)) {
2778 spin_unlock(&q->busylock);
2779 contended = false;
2780 }
2781 __qdisc_run(q);
2782 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002783 }
2784 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002785 if (unlikely(contended))
2786 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002787 return rc;
2788}
2789
Neil Horman5bc14212011-11-22 05:10:51 +00002790#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2791static void skb_update_prio(struct sk_buff *skb)
2792{
Igor Maravic6977a792011-11-25 07:44:54 +00002793 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002794
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002795 if (!skb->priority && skb->sk && map) {
2796 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2797
2798 if (prioidx < map->priomap_len)
2799 skb->priority = map->priomap[prioidx];
2800 }
Neil Horman5bc14212011-11-22 05:10:51 +00002801}
2802#else
2803#define skb_update_prio(skb)
2804#endif
2805
Eric Dumazet745e20f2010-09-29 13:23:09 -07002806static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002807#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002808
Dave Jonesd29f7492008-07-22 14:09:06 -07002809/**
Michel Machado95603e22012-06-12 10:16:35 +00002810 * dev_loopback_xmit - loop back @skb
2811 * @skb: buffer to transmit
2812 */
2813int dev_loopback_xmit(struct sk_buff *skb)
2814{
2815 skb_reset_mac_header(skb);
2816 __skb_pull(skb, skb_network_offset(skb));
2817 skb->pkt_type = PACKET_LOOPBACK;
2818 skb->ip_summed = CHECKSUM_UNNECESSARY;
2819 WARN_ON(!skb_dst(skb));
2820 skb_dst_force(skb);
2821 netif_rx_ni(skb);
2822 return 0;
2823}
2824EXPORT_SYMBOL(dev_loopback_xmit);
2825
2826/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002827 * dev_queue_xmit - transmit a buffer
2828 * @skb: buffer to transmit
2829 *
2830 * Queue a buffer for transmission to a network device. The caller must
2831 * have set the device and priority and built the buffer before calling
2832 * this function. The function can be called from an interrupt.
2833 *
2834 * A negative errno code is returned on a failure. A success does not
2835 * guarantee the frame will be transmitted as it may be dropped due
2836 * to congestion or traffic shaping.
2837 *
2838 * -----------------------------------------------------------------------------------
2839 * I notice this method can also return errors from the queue disciplines,
2840 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2841 * be positive.
2842 *
2843 * Regardless of the return value, the skb is consumed, so it is currently
2844 * difficult to retry a send to this method. (You can bump the ref count
2845 * before sending to hold a reference for retry if you are careful.)
2846 *
2847 * When calling this method, interrupts MUST be enabled. This is because
2848 * the BH enable code must have IRQs enabled so that it will not deadlock.
2849 * --BLG
2850 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851int dev_queue_xmit(struct sk_buff *skb)
2852{
2853 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002854 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 struct Qdisc *q;
2856 int rc = -ENOMEM;
2857
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002858 /* Disable soft irqs for various locks below. Also
2859 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002861 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
Neil Horman5bc14212011-11-22 05:10:51 +00002863 skb_update_prio(skb);
2864
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002865 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002866 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002867
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002869 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002871 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002873 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002874 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 }
2876
2877 /* The device has no queue. Common case for software devices:
2878 loopback, all the sorts of tunnels...
2879
Herbert Xu932ff272006-06-09 12:20:56 -07002880 Really, it is unlikely that netif_tx_lock protection is necessary
2881 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 counters.)
2883 However, it is possible, that they rely on protection
2884 made by us here.
2885
2886 Check this and shot the lock. It is not prone from deadlocks.
2887 Either shot noqueue qdisc, it is even simpler 8)
2888 */
2889 if (dev->flags & IFF_UP) {
2890 int cpu = smp_processor_id(); /* ok because BHs are off */
2891
David S. Millerc773e842008-07-08 23:13:53 -07002892 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
Eric Dumazet745e20f2010-09-29 13:23:09 -07002894 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2895 goto recursion_alert;
2896
David S. Millerc773e842008-07-08 23:13:53 -07002897 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898
Tom Herbert734664982011-11-28 16:32:44 +00002899 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002900 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002901 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002902 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002903 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002904 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 goto out;
2906 }
2907 }
David S. Millerc773e842008-07-08 23:13:53 -07002908 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002909 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2910 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 } else {
2912 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002913 * unfortunately
2914 */
2915recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002916 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2917 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 }
2919 }
2920
2921 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002922 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 kfree_skb(skb);
2925 return rc;
2926out:
Herbert Xud4828d82006-06-22 02:28:18 -07002927 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 return rc;
2929}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002930EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932
2933/*=======================================================================
2934 Receiver routines
2935 =======================================================================*/
2936
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002937int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002938EXPORT_SYMBOL(netdev_max_backlog);
2939
Eric Dumazet3b098e22010-05-15 23:57:10 -07002940int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002941int netdev_budget __read_mostly = 300;
2942int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002944/* Called with irq disabled */
2945static inline void ____napi_schedule(struct softnet_data *sd,
2946 struct napi_struct *napi)
2947{
2948 list_add_tail(&napi->poll_list, &sd->poll_list);
2949 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2950}
2951
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002952/*
2953 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
Tom Herbertbdeab992011-08-14 19:45:55 +00002954 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2955 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2956 * if hash is a canonical 4-tuple hash over transport ports.
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002957 */
Tom Herbertbdeab992011-08-14 19:45:55 +00002958void __skb_get_rxhash(struct sk_buff *skb)
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002959{
Eric Dumazet4504b862011-11-28 05:23:23 +00002960 struct flow_keys keys;
2961 u32 hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002962
Eric Dumazet4504b862011-11-28 05:23:23 +00002963 if (!skb_flow_dissect(skb, &keys))
2964 return;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002965
Chema Gonzalez68622342012-09-07 13:40:50 +00002966 if (keys.ports)
Eric Dumazet4504b862011-11-28 05:23:23 +00002967 skb->l4_rxhash = 1;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002968
2969 /* get a consistent hash (same value on both flow directions) */
Chema Gonzalez68622342012-09-07 13:40:50 +00002970 if (((__force u32)keys.dst < (__force u32)keys.src) ||
2971 (((__force u32)keys.dst == (__force u32)keys.src) &&
2972 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
Eric Dumazet4504b862011-11-28 05:23:23 +00002973 swap(keys.dst, keys.src);
Chema Gonzalez68622342012-09-07 13:40:50 +00002974 swap(keys.port16[0], keys.port16[1]);
2975 }
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002976
Eric Dumazet4504b862011-11-28 05:23:23 +00002977 hash = jhash_3words((__force u32)keys.dst,
2978 (__force u32)keys.src,
2979 (__force u32)keys.ports, hashrnd);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002980 if (!hash)
2981 hash = 1;
2982
Tom Herbertbdeab992011-08-14 19:45:55 +00002983 skb->rxhash = hash;
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002984}
2985EXPORT_SYMBOL(__skb_get_rxhash);
2986
Eric Dumazetdf334542010-03-24 19:13:54 +00002987#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002988
2989/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002990struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002991EXPORT_SYMBOL(rps_sock_flow_table);
2992
Ingo Molnarc5905af2012-02-24 08:31:31 +01002993struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002994
Ben Hutchingsc4454772011-01-19 11:03:53 +00002995static struct rps_dev_flow *
2996set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2997 struct rps_dev_flow *rflow, u16 next_cpu)
2998{
Ben Hutchings09994d12011-10-03 04:42:46 +00002999 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00003000#ifdef CONFIG_RFS_ACCEL
3001 struct netdev_rx_queue *rxqueue;
3002 struct rps_dev_flow_table *flow_table;
3003 struct rps_dev_flow *old_rflow;
3004 u32 flow_id;
3005 u16 rxq_index;
3006 int rc;
3007
3008 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003009 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3010 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003011 goto out;
3012 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3013 if (rxq_index == skb_get_rx_queue(skb))
3014 goto out;
3015
3016 rxqueue = dev->_rx + rxq_index;
3017 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3018 if (!flow_table)
3019 goto out;
3020 flow_id = skb->rxhash & flow_table->mask;
3021 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3022 rxq_index, flow_id);
3023 if (rc < 0)
3024 goto out;
3025 old_rflow = rflow;
3026 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003027 rflow->filter = rc;
3028 if (old_rflow->filter == rflow->filter)
3029 old_rflow->filter = RPS_NO_FILTER;
3030 out:
3031#endif
3032 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003033 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003034 }
3035
Ben Hutchings09994d12011-10-03 04:42:46 +00003036 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003037 return rflow;
3038}
3039
Tom Herbert0a9627f2010-03-16 08:03:29 +00003040/*
3041 * get_rps_cpu is called from netif_receive_skb and returns the target
3042 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003043 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003044 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003045static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3046 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003047{
Tom Herbert0a9627f2010-03-16 08:03:29 +00003048 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003049 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07003050 struct rps_dev_flow_table *flow_table;
3051 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003052 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07003053 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003054
Tom Herbert0a9627f2010-03-16 08:03:29 +00003055 if (skb_rx_queue_recorded(skb)) {
3056 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003057 if (unlikely(index >= dev->real_num_rx_queues)) {
3058 WARN_ONCE(dev->real_num_rx_queues > 1,
3059 "%s received packet on queue %u, but number "
3060 "of RX queues is %u\n",
3061 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003062 goto done;
3063 }
3064 rxqueue = dev->_rx + index;
3065 } else
3066 rxqueue = dev->_rx;
3067
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003068 map = rcu_dereference(rxqueue->rps_map);
3069 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003070 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003071 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003072 tcpu = map->cpus[0];
3073 if (cpu_online(tcpu))
3074 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003075 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003076 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003077 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003078 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003079 }
3080
Changli Gao2d47b452010-08-17 19:00:56 +00003081 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003082 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003083 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003084
Tom Herbertfec5e652010-04-16 16:01:27 -07003085 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3086 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3087 if (flow_table && sock_flow_table) {
3088 u16 next_cpu;
3089 struct rps_dev_flow *rflow;
3090
3091 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3092 tcpu = rflow->cpu;
3093
3094 next_cpu = sock_flow_table->ents[skb->rxhash &
3095 sock_flow_table->mask];
3096
3097 /*
3098 * If the desired CPU (where last recvmsg was done) is
3099 * different from current CPU (one in the rx-queue flow
3100 * table entry), switch if one of the following holds:
3101 * - Current CPU is unset (equal to RPS_NO_CPU).
3102 * - Current CPU is offline.
3103 * - The current CPU's queue tail has advanced beyond the
3104 * last packet that was enqueued using this table entry.
3105 * This guarantees that all previous packets for the flow
3106 * have been dequeued, thus preserving in order delivery.
3107 */
3108 if (unlikely(tcpu != next_cpu) &&
3109 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3110 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003111 rflow->last_qtail)) >= 0)) {
3112 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003113 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003114 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003115
Tom Herbertfec5e652010-04-16 16:01:27 -07003116 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3117 *rflowp = rflow;
3118 cpu = tcpu;
3119 goto done;
3120 }
3121 }
3122
Tom Herbert0a9627f2010-03-16 08:03:29 +00003123 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003124 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003125
3126 if (cpu_online(tcpu)) {
3127 cpu = tcpu;
3128 goto done;
3129 }
3130 }
3131
3132done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003133 return cpu;
3134}
3135
Ben Hutchingsc4454772011-01-19 11:03:53 +00003136#ifdef CONFIG_RFS_ACCEL
3137
3138/**
3139 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3140 * @dev: Device on which the filter was set
3141 * @rxq_index: RX queue index
3142 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3143 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3144 *
3145 * Drivers that implement ndo_rx_flow_steer() should periodically call
3146 * this function for each installed filter and remove the filters for
3147 * which it returns %true.
3148 */
3149bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3150 u32 flow_id, u16 filter_id)
3151{
3152 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3153 struct rps_dev_flow_table *flow_table;
3154 struct rps_dev_flow *rflow;
3155 bool expire = true;
3156 int cpu;
3157
3158 rcu_read_lock();
3159 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3160 if (flow_table && flow_id <= flow_table->mask) {
3161 rflow = &flow_table->flows[flow_id];
3162 cpu = ACCESS_ONCE(rflow->cpu);
3163 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3164 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3165 rflow->last_qtail) <
3166 (int)(10 * flow_table->mask)))
3167 expire = false;
3168 }
3169 rcu_read_unlock();
3170 return expire;
3171}
3172EXPORT_SYMBOL(rps_may_expire_flow);
3173
3174#endif /* CONFIG_RFS_ACCEL */
3175
Tom Herbert0a9627f2010-03-16 08:03:29 +00003176/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003177static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003178{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003179 struct softnet_data *sd = data;
3180
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003181 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003182 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003183}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003184
Tom Herbertfec5e652010-04-16 16:01:27 -07003185#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003186
3187/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003188 * Check if this softnet_data structure is another cpu one
3189 * If yes, queue it to our IPI list and return 1
3190 * If no, return 0
3191 */
3192static int rps_ipi_queued(struct softnet_data *sd)
3193{
3194#ifdef CONFIG_RPS
3195 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3196
3197 if (sd != mysd) {
3198 sd->rps_ipi_next = mysd->rps_ipi_list;
3199 mysd->rps_ipi_list = sd;
3200
3201 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3202 return 1;
3203 }
3204#endif /* CONFIG_RPS */
3205 return 0;
3206}
3207
3208/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003209 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3210 * queue (may be a remote CPU queue).
3211 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003212static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3213 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003214{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003215 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003216 unsigned long flags;
3217
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003218 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003219
3220 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003221
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003222 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003223 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3224 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003225enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003226 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003227 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003228 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003229 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003230 return NET_RX_SUCCESS;
3231 }
3232
Eric Dumazetebda37c22010-05-06 23:51:21 +00003233 /* Schedule NAPI for backlog device
3234 * We can use non atomic operation since we own the queue lock
3235 */
3236 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003237 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003238 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003239 }
3240 goto enqueue;
3241 }
3242
Changli Gaodee42872010-05-02 05:42:16 +00003243 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003244 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003245
Tom Herbert0a9627f2010-03-16 08:03:29 +00003246 local_irq_restore(flags);
3247
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003248 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003249 kfree_skb(skb);
3250 return NET_RX_DROP;
3251}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253/**
3254 * netif_rx - post buffer to the network code
3255 * @skb: buffer to post
3256 *
3257 * This function receives a packet from a device driver and queues it for
3258 * the upper (protocol) levels to process. It always succeeds. The buffer
3259 * may be dropped during processing for congestion control or by the
3260 * protocol layers.
3261 *
3262 * return values:
3263 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 * NET_RX_DROP (packet was dropped)
3265 *
3266 */
3267
3268int netif_rx(struct sk_buff *skb)
3269{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003270 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271
3272 /* if netpoll wants it, pretend we never saw it */
3273 if (netpoll_rx(skb))
3274 return NET_RX_DROP;
3275
Eric Dumazet588f0332011-11-15 04:12:55 +00003276 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277
Koki Sanagicf66ba52010-08-23 18:45:02 +09003278 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003279#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003280 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003281 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003282 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
Changli Gaocece1942010-08-07 20:35:43 -07003284 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003285 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003286
3287 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003288 if (cpu < 0)
3289 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003290
3291 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3292
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003293 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003294 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003295 } else
3296#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003297 {
3298 unsigned int qtail;
3299 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3300 put_cpu();
3301 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003302 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003304EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305
3306int netif_rx_ni(struct sk_buff *skb)
3307{
3308 int err;
3309
3310 preempt_disable();
3311 err = netif_rx(skb);
3312 if (local_softirq_pending())
3313 do_softirq();
3314 preempt_enable();
3315
3316 return err;
3317}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318EXPORT_SYMBOL(netif_rx_ni);
3319
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320static void net_tx_action(struct softirq_action *h)
3321{
3322 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3323
3324 if (sd->completion_queue) {
3325 struct sk_buff *clist;
3326
3327 local_irq_disable();
3328 clist = sd->completion_queue;
3329 sd->completion_queue = NULL;
3330 local_irq_enable();
3331
3332 while (clist) {
3333 struct sk_buff *skb = clist;
3334 clist = clist->next;
3335
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003336 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003337 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338 __kfree_skb(skb);
3339 }
3340 }
3341
3342 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003343 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344
3345 local_irq_disable();
3346 head = sd->output_queue;
3347 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003348 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 local_irq_enable();
3350
3351 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003352 struct Qdisc *q = head;
3353 spinlock_t *root_lock;
3354
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 head = head->next_sched;
3356
David S. Miller5fb66222008-08-02 20:02:43 -07003357 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003358 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003359 smp_mb__before_clear_bit();
3360 clear_bit(__QDISC_STATE_SCHED,
3361 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003362 qdisc_run(q);
3363 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003365 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003366 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003367 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003368 } else {
3369 smp_mb__before_clear_bit();
3370 clear_bit(__QDISC_STATE_SCHED,
3371 &q->state);
3372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 }
3374 }
3375 }
3376}
3377
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003378#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3379 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003380/* This hook is defined here for ATM LANE */
3381int (*br_fdb_test_addr_hook)(struct net_device *dev,
3382 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003383EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003384#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386#ifdef CONFIG_NET_CLS_ACT
3387/* TODO: Maybe we should just force sch_ingress to be compiled in
3388 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3389 * a compare and 2 stores extra right now if we dont have it on
3390 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003391 * NOTE: This doesn't stop any functionality; if you dont have
3392 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 *
3394 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003395static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003398 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003399 int result = TC_ACT_OK;
3400 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003401
Stephen Hemmingerde384832010-08-01 00:33:23 -07003402 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003403 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3404 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003405 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 }
3407
Herbert Xuf697c3e2007-10-14 00:38:47 -07003408 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3409 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3410
David S. Miller83874002008-07-17 00:53:03 -07003411 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003412 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003413 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003414 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3415 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003416 spin_unlock(qdisc_lock(q));
3417 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003418
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 return result;
3420}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003421
3422static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3423 struct packet_type **pt_prev,
3424 int *ret, struct net_device *orig_dev)
3425{
Eric Dumazet24824a02010-10-02 06:11:55 +00003426 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3427
3428 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003429 goto out;
3430
3431 if (*pt_prev) {
3432 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3433 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003434 }
3435
Eric Dumazet24824a02010-10-02 06:11:55 +00003436 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003437 case TC_ACT_SHOT:
3438 case TC_ACT_STOLEN:
3439 kfree_skb(skb);
3440 return NULL;
3441 }
3442
3443out:
3444 skb->tc_verd = 0;
3445 return skb;
3446}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447#endif
3448
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003449/**
3450 * netdev_rx_handler_register - register receive handler
3451 * @dev: device to register a handler for
3452 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003453 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003454 *
3455 * Register a receive hander for a device. This handler will then be
3456 * called from __netif_receive_skb. A negative errno code is returned
3457 * on a failure.
3458 *
3459 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003460 *
3461 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003462 */
3463int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003464 rx_handler_func_t *rx_handler,
3465 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003466{
3467 ASSERT_RTNL();
3468
3469 if (dev->rx_handler)
3470 return -EBUSY;
3471
Jiri Pirko93e2c322010-06-10 03:34:59 +00003472 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003473 rcu_assign_pointer(dev->rx_handler, rx_handler);
3474
3475 return 0;
3476}
3477EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3478
3479/**
3480 * netdev_rx_handler_unregister - unregister receive handler
3481 * @dev: device to unregister a handler from
3482 *
3483 * Unregister a receive hander from a device.
3484 *
3485 * The caller must hold the rtnl_mutex.
3486 */
3487void netdev_rx_handler_unregister(struct net_device *dev)
3488{
3489
3490 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003491 RCU_INIT_POINTER(dev->rx_handler, NULL);
3492 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003493}
3494EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3495
Mel Gormanb4b9e352012-07-31 16:44:26 -07003496/*
3497 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3498 * the special handling of PFMEMALLOC skbs.
3499 */
3500static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3501{
3502 switch (skb->protocol) {
3503 case __constant_htons(ETH_P_ARP):
3504 case __constant_htons(ETH_P_IP):
3505 case __constant_htons(ETH_P_IPV6):
3506 case __constant_htons(ETH_P_8021Q):
3507 return true;
3508 default:
3509 return false;
3510 }
3511}
3512
Eric Dumazet10f744d2010-03-28 23:07:20 -07003513static int __netif_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514{
3515 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003516 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003517 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003518 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003519 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003521 __be16 type;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003522 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
Eric Dumazet588f0332011-11-15 04:12:55 +00003524 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003525
Koki Sanagicf66ba52010-08-23 18:45:02 +09003526 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003527
Mel Gormanb4b9e352012-07-31 16:44:26 -07003528 /*
3529 * PFMEMALLOC skbs are special, they should
3530 * - be delivered to SOCK_MEMALLOC sockets only
3531 * - stay away from userspace
3532 * - have bounded memory usage
3533 *
3534 * Use PF_MEMALLOC as this saves us from propagating the allocation
3535 * context down to all allocation sites.
3536 */
3537 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3538 current->flags |= PF_MEMALLOC;
3539
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003541 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003542 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003544 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003545
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003546 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003547 if (!skb_transport_header_was_set(skb))
3548 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003549 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550
3551 pt_prev = NULL;
3552
3553 rcu_read_lock();
3554
David S. Miller63d8ea72011-02-28 10:48:59 -08003555another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003556 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003557
3558 __this_cpu_inc(softnet_data.processed);
3559
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003560 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3561 skb = vlan_untag(skb);
3562 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003563 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003564 }
3565
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566#ifdef CONFIG_NET_CLS_ACT
3567 if (skb->tc_verd & TC_NCLS) {
3568 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3569 goto ncls;
3570 }
3571#endif
3572
Mel Gormanb4b9e352012-07-31 16:44:26 -07003573 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3574 goto skip_taps;
3575
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003577 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003578 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003579 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 pt_prev = ptype;
3581 }
3582 }
3583
Mel Gormanb4b9e352012-07-31 16:44:26 -07003584skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003586 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3587 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003588 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589ncls:
3590#endif
3591
Mel Gormanb4b9e352012-07-31 16:44:26 -07003592 if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3593 && !skb_pfmemalloc_protocol(skb))
3594 goto drop;
3595
John Fastabend24257172011-10-10 09:16:41 +00003596 if (vlan_tx_tag_present(skb)) {
3597 if (pt_prev) {
3598 ret = deliver_skb(skb, pt_prev, orig_dev);
3599 pt_prev = NULL;
3600 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003601 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003602 goto another_round;
3603 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003604 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003605 }
3606
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003607 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003608 if (rx_handler) {
3609 if (pt_prev) {
3610 ret = deliver_skb(skb, pt_prev, orig_dev);
3611 pt_prev = NULL;
3612 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003613 switch (rx_handler(&skb)) {
3614 case RX_HANDLER_CONSUMED:
Mel Gormanb4b9e352012-07-31 16:44:26 -07003615 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003616 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003617 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003618 case RX_HANDLER_EXACT:
3619 deliver_exact = true;
3620 case RX_HANDLER_PASS:
3621 break;
3622 default:
3623 BUG();
3624 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003627 if (vlan_tx_nonzero_tag_present(skb))
3628 skb->pkt_type = PACKET_OTHERHOST;
3629
David S. Miller63d8ea72011-02-28 10:48:59 -08003630 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003631 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003632
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003634 list_for_each_entry_rcu(ptype,
3635 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003636 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003637 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3638 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003639 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003640 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 pt_prev = ptype;
3642 }
3643 }
3644
3645 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003646 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003647 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003648 else
3649 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003651drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003652 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 kfree_skb(skb);
3654 /* Jamal, now you will not able to escape explaining
3655 * me how you were going to use this. :-)
3656 */
3657 ret = NET_RX_DROP;
3658 }
3659
Mel Gormanb4b9e352012-07-31 16:44:26 -07003660unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003662out:
3663 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 return ret;
3665}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003666
3667/**
3668 * netif_receive_skb - process receive buffer from network
3669 * @skb: buffer to process
3670 *
3671 * netif_receive_skb() is the main receive data processing function.
3672 * It always succeeds. The buffer may be dropped during processing
3673 * for congestion control or by the protocol layers.
3674 *
3675 * This function may only be called from softirq context and interrupts
3676 * should be enabled.
3677 *
3678 * Return values (usually ignored):
3679 * NET_RX_SUCCESS: no congestion
3680 * NET_RX_DROP: packet was dropped
3681 */
3682int netif_receive_skb(struct sk_buff *skb)
3683{
Eric Dumazet588f0332011-11-15 04:12:55 +00003684 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003685
Richard Cochranc1f19b52010-07-17 08:49:36 +00003686 if (skb_defer_rx_timestamp(skb))
3687 return NET_RX_SUCCESS;
3688
Eric Dumazetdf334542010-03-24 19:13:54 +00003689#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003690 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003691 struct rps_dev_flow voidflow, *rflow = &voidflow;
3692 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003693
Eric Dumazet3b098e22010-05-15 23:57:10 -07003694 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003695
Eric Dumazet3b098e22010-05-15 23:57:10 -07003696 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003697
Eric Dumazet3b098e22010-05-15 23:57:10 -07003698 if (cpu >= 0) {
3699 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3700 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003701 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003702 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003703 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003704 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003705#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003706 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003707}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003708EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709
Eric Dumazet88751272010-04-19 05:07:33 +00003710/* Network device is going away, flush any packets still pending
3711 * Called with irqs disabled.
3712 */
Changli Gao152102c2010-03-30 20:16:22 +00003713static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003714{
Changli Gao152102c2010-03-30 20:16:22 +00003715 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003716 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003717 struct sk_buff *skb, *tmp;
3718
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003719 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003720 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003721 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003722 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003723 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003724 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003725 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003726 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003727 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003728
3729 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3730 if (skb->dev == dev) {
3731 __skb_unlink(skb, &sd->process_queue);
3732 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003733 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003734 }
3735 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003736}
3737
Herbert Xud565b0a2008-12-15 23:38:52 -08003738static int napi_gro_complete(struct sk_buff *skb)
3739{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003740 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003741 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003742 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003743 int err = -ENOENT;
3744
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003745 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3746
Herbert Xufc59f9a2009-04-14 15:11:06 -07003747 if (NAPI_GRO_CB(skb)->count == 1) {
3748 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003749 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003750 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003751
3752 rcu_read_lock();
3753 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003754 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003755 continue;
3756
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003757 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003758 break;
3759 }
3760 rcu_read_unlock();
3761
3762 if (err) {
3763 WARN_ON(&ptype->list == head);
3764 kfree_skb(skb);
3765 return NET_RX_SUCCESS;
3766 }
3767
3768out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003769 return netif_receive_skb(skb);
3770}
3771
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003772/* napi->gro_list contains packets ordered by age.
3773 * youngest packets at the head of it.
3774 * Complete skbs in reverse order to reduce latencies.
3775 */
3776void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003777{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003778 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003779
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003780 /* scan list and build reverse chain */
3781 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3782 skb->prev = prev;
3783 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003784 }
3785
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003786 for (skb = prev; skb; skb = prev) {
3787 skb->next = NULL;
3788
3789 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3790 return;
3791
3792 prev = skb->prev;
3793 napi_gro_complete(skb);
3794 napi->gro_count--;
3795 }
3796
Herbert Xud565b0a2008-12-15 23:38:52 -08003797 napi->gro_list = NULL;
3798}
Eric Dumazet86cac582010-08-31 18:25:32 +00003799EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003800
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003801static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3802{
3803 struct sk_buff *p;
3804 unsigned int maclen = skb->dev->hard_header_len;
3805
3806 for (p = napi->gro_list; p; p = p->next) {
3807 unsigned long diffs;
3808
3809 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3810 diffs |= p->vlan_tci ^ skb->vlan_tci;
3811 if (maclen == ETH_HLEN)
3812 diffs |= compare_ether_header(skb_mac_header(p),
3813 skb_gro_mac_header(skb));
3814 else if (!diffs)
3815 diffs = memcmp(skb_mac_header(p),
3816 skb_gro_mac_header(skb),
3817 maclen);
3818 NAPI_GRO_CB(p)->same_flow = !diffs;
3819 NAPI_GRO_CB(p)->flush = 0;
3820 }
3821}
3822
Rami Rosenbb728822012-11-28 21:55:25 +00003823static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003824{
3825 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003826 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003827 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003828 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003829 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08003830 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003831 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003832
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003833 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003834 goto normal;
3835
David S. Miller21dc3302010-08-23 00:13:46 -07003836 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003837 goto normal;
3838
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003839 gro_list_prepare(napi, skb);
3840
Herbert Xud565b0a2008-12-15 23:38:52 -08003841 rcu_read_lock();
3842 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003843 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003844 continue;
3845
Herbert Xu86911732009-01-29 14:19:50 +00003846 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08003847 mac_len = skb->network_header - skb->mac_header;
3848 skb->mac_len = mac_len;
3849 NAPI_GRO_CB(skb)->same_flow = 0;
3850 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003851 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003852
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003853 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003854 break;
3855 }
3856 rcu_read_unlock();
3857
3858 if (&ptype->list == head)
3859 goto normal;
3860
Herbert Xu0da2afd52008-12-26 14:57:42 -08003861 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003862 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003863
Herbert Xud565b0a2008-12-15 23:38:52 -08003864 if (pp) {
3865 struct sk_buff *nskb = *pp;
3866
3867 *pp = nskb->next;
3868 nskb->next = NULL;
3869 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003870 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003871 }
3872
Herbert Xu0da2afd52008-12-26 14:57:42 -08003873 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003874 goto ok;
3875
Herbert Xu4ae55442009-02-08 18:00:36 +00003876 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003877 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003878
Herbert Xu4ae55442009-02-08 18:00:36 +00003879 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003880 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003881 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003882 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003883 skb->next = napi->gro_list;
3884 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003885 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003886
Herbert Xuad0f9902009-02-01 01:24:55 -08003887pull:
Herbert Xucb189782009-05-26 18:50:31 +00003888 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3889 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3890
3891 BUG_ON(skb->end - skb->tail < grow);
3892
3893 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3894
3895 skb->tail += grow;
3896 skb->data_len -= grow;
3897
3898 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003899 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003900
Eric Dumazet9e903e02011-10-18 21:00:24 +00003901 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003902 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003903 memmove(skb_shinfo(skb)->frags,
3904 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003905 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003906 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003907 }
3908
Herbert Xud565b0a2008-12-15 23:38:52 -08003909ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003910 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003911
3912normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003913 ret = GRO_NORMAL;
3914 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003915}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003916
Herbert Xu96e93ea2009-01-06 10:49:34 -08003917
Rami Rosenbb728822012-11-28 21:55:25 +00003918static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003919{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003920 switch (ret) {
3921 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003922 if (netif_receive_skb(skb))
3923 ret = GRO_DROP;
3924 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003925
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003926 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003927 kfree_skb(skb);
3928 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003929
Eric Dumazetdaa86542012-04-19 07:07:40 +00003930 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003931 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3932 kmem_cache_free(skbuff_head_cache, skb);
3933 else
3934 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003935 break;
3936
Ben Hutchings5b252f02009-10-29 07:17:09 +00003937 case GRO_HELD:
3938 case GRO_MERGED:
3939 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003940 }
3941
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003942 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003943}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003944
Eric Dumazetca07e432012-10-06 22:28:06 +00003945static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003946{
Eric Dumazetca07e432012-10-06 22:28:06 +00003947 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3948 const skb_frag_t *frag0 = &pinfo->frags[0];
3949
Herbert Xu78a478d2009-05-26 18:50:21 +00003950 NAPI_GRO_CB(skb)->data_offset = 0;
3951 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003952 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003953
Herbert Xu78d3fd02009-05-26 18:50:23 +00003954 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003955 pinfo->nr_frags &&
3956 !PageHighMem(skb_frag_page(frag0))) {
3957 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3958 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003959 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003960}
Herbert Xu78a478d2009-05-26 18:50:21 +00003961
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003962gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003963{
Herbert Xu86911732009-01-29 14:19:50 +00003964 skb_gro_reset_offset(skb);
3965
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003966 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003967}
3968EXPORT_SYMBOL(napi_gro_receive);
3969
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003970static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003971{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003972 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003973 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3974 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003975 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003976 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003977 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003978
3979 napi->skb = skb;
3980}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003981
Herbert Xu76620aa2009-04-16 02:02:07 -07003982struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003983{
Herbert Xu5d38a072009-01-04 16:13:40 -08003984 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003985
3986 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003987 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3988 if (skb)
3989 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003990 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003991 return skb;
3992}
Herbert Xu76620aa2009-04-16 02:02:07 -07003993EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003994
Rami Rosenbb728822012-11-28 21:55:25 +00003995static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003996 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003997{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003998 switch (ret) {
3999 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00004000 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00004001 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00004002
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004003 if (ret == GRO_HELD)
4004 skb_gro_pull(skb, -ETH_HLEN);
4005 else if (netif_receive_skb(skb))
4006 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004007 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004008
4009 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004010 case GRO_MERGED_FREE:
4011 napi_reuse_skb(napi, skb);
4012 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004013
4014 case GRO_MERGED:
4015 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004016 }
4017
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004018 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004019}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004020
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004021static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004022{
Herbert Xu76620aa2009-04-16 02:02:07 -07004023 struct sk_buff *skb = napi->skb;
4024 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004025 unsigned int hlen;
4026 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004027
4028 napi->skb = NULL;
4029
4030 skb_reset_mac_header(skb);
4031 skb_gro_reset_offset(skb);
4032
Herbert Xua5b1cf22009-05-26 18:50:28 +00004033 off = skb_gro_offset(skb);
4034 hlen = off + sizeof(*eth);
4035 eth = skb_gro_header_fast(skb, off);
4036 if (skb_gro_header_hard(skb, hlen)) {
4037 eth = skb_gro_header_slow(skb, hlen, off);
4038 if (unlikely(!eth)) {
4039 napi_reuse_skb(napi, skb);
4040 skb = NULL;
4041 goto out;
4042 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004043 }
4044
4045 skb_gro_pull(skb, sizeof(*eth));
4046
4047 /*
4048 * This works because the only protocols we care about don't require
4049 * special handling. We'll fix it up properly at the end.
4050 */
4051 skb->protocol = eth->h_proto;
4052
4053out:
4054 return skb;
4055}
Herbert Xu76620aa2009-04-16 02:02:07 -07004056
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004057gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004058{
4059 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004060
4061 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004062 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004063
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004064 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004065}
4066EXPORT_SYMBOL(napi_gro_frags);
4067
Eric Dumazete326bed2010-04-22 00:22:45 -07004068/*
4069 * net_rps_action sends any pending IPI's for rps.
4070 * Note: called with local irq disabled, but exits with local irq enabled.
4071 */
4072static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4073{
4074#ifdef CONFIG_RPS
4075 struct softnet_data *remsd = sd->rps_ipi_list;
4076
4077 if (remsd) {
4078 sd->rps_ipi_list = NULL;
4079
4080 local_irq_enable();
4081
4082 /* Send pending IPI's to kick RPS processing on remote cpus. */
4083 while (remsd) {
4084 struct softnet_data *next = remsd->rps_ipi_next;
4085
4086 if (cpu_online(remsd->cpu))
4087 __smp_call_function_single(remsd->cpu,
4088 &remsd->csd, 0);
4089 remsd = next;
4090 }
4091 } else
4092#endif
4093 local_irq_enable();
4094}
4095
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004096static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097{
4098 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004099 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100
Eric Dumazete326bed2010-04-22 00:22:45 -07004101#ifdef CONFIG_RPS
4102 /* Check if we have pending ipi, its better to send them now,
4103 * not waiting net_rx_action() end.
4104 */
4105 if (sd->rps_ipi_list) {
4106 local_irq_disable();
4107 net_rps_action_and_irq_enable(sd);
4108 }
4109#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004110 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004111 local_irq_disable();
4112 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004114 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
Changli Gao6e7676c2010-04-27 15:07:33 -07004116 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004117 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004118 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004119 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004120 input_queue_head_incr(sd);
4121 if (++work >= quota) {
4122 local_irq_enable();
4123 return work;
4124 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004125 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126
Changli Gao6e7676c2010-04-27 15:07:33 -07004127 rps_lock(sd);
4128 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004129 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004130 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4131 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004132
Changli Gao6e7676c2010-04-27 15:07:33 -07004133 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004134 /*
4135 * Inline a custom version of __napi_complete().
4136 * only current cpu owns and manipulates this napi,
4137 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4138 * we can use a plain write instead of clear_bit(),
4139 * and we dont need an smp_mb() memory barrier.
4140 */
4141 list_del(&napi->poll_list);
4142 napi->state = 0;
4143
Changli Gao6e7676c2010-04-27 15:07:33 -07004144 quota = work + qlen;
4145 }
4146 rps_unlock(sd);
4147 }
4148 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004150 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151}
4152
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004153/**
4154 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004155 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004156 *
4157 * The entry's receive function will be scheduled to run
4158 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004159void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004160{
4161 unsigned long flags;
4162
4163 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004164 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004165 local_irq_restore(flags);
4166}
4167EXPORT_SYMBOL(__napi_schedule);
4168
Herbert Xud565b0a2008-12-15 23:38:52 -08004169void __napi_complete(struct napi_struct *n)
4170{
4171 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4172 BUG_ON(n->gro_list);
4173
4174 list_del(&n->poll_list);
4175 smp_mb__before_clear_bit();
4176 clear_bit(NAPI_STATE_SCHED, &n->state);
4177}
4178EXPORT_SYMBOL(__napi_complete);
4179
4180void napi_complete(struct napi_struct *n)
4181{
4182 unsigned long flags;
4183
4184 /*
4185 * don't let napi dequeue from the cpu poll list
4186 * just in case its running on a different cpu
4187 */
4188 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4189 return;
4190
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004191 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004192 local_irq_save(flags);
4193 __napi_complete(n);
4194 local_irq_restore(flags);
4195}
4196EXPORT_SYMBOL(napi_complete);
4197
4198void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4199 int (*poll)(struct napi_struct *, int), int weight)
4200{
4201 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004202 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004203 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004204 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004205 napi->poll = poll;
4206 napi->weight = weight;
4207 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004208 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004209#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004210 spin_lock_init(&napi->poll_lock);
4211 napi->poll_owner = -1;
4212#endif
4213 set_bit(NAPI_STATE_SCHED, &napi->state);
4214}
4215EXPORT_SYMBOL(netif_napi_add);
4216
4217void netif_napi_del(struct napi_struct *napi)
4218{
4219 struct sk_buff *skb, *next;
4220
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004221 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004222 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004223
4224 for (skb = napi->gro_list; skb; skb = next) {
4225 next = skb->next;
4226 skb->next = NULL;
4227 kfree_skb(skb);
4228 }
4229
4230 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004231 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004232}
4233EXPORT_SYMBOL(netif_napi_del);
4234
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235static void net_rx_action(struct softirq_action *h)
4236{
Eric Dumazete326bed2010-04-22 00:22:45 -07004237 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004238 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004239 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004240 void *have;
4241
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 local_irq_disable();
4243
Eric Dumazete326bed2010-04-22 00:22:45 -07004244 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004245 struct napi_struct *n;
4246 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004248 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004249 * Allow this to run for 2 jiffies since which will allow
4250 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004251 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004252 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 goto softnet_break;
4254
4255 local_irq_enable();
4256
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004257 /* Even though interrupts have been re-enabled, this
4258 * access is safe because interrupts can only add new
4259 * entries to the tail of this list, and only ->poll()
4260 * calls can remove this head entry from the list.
4261 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004262 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004264 have = netpoll_poll_lock(n);
4265
4266 weight = n->weight;
4267
David S. Miller0a7606c2007-10-29 21:28:47 -07004268 /* This NAPI_STATE_SCHED test is for avoiding a race
4269 * with netpoll's poll_napi(). Only the entity which
4270 * obtains the lock and sees NAPI_STATE_SCHED set will
4271 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004272 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004273 */
4274 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004275 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004276 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004277 trace_napi_poll(n);
4278 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004279
4280 WARN_ON_ONCE(work > weight);
4281
4282 budget -= work;
4283
4284 local_irq_disable();
4285
4286 /* Drivers must not modify the NAPI state if they
4287 * consume the entire weight. In such cases this code
4288 * still "owns" the NAPI instance and therefore can
4289 * move the instance around on the list at-will.
4290 */
David S. Millerfed17f32008-01-07 21:00:40 -08004291 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004292 if (unlikely(napi_disable_pending(n))) {
4293 local_irq_enable();
4294 napi_complete(n);
4295 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004296 } else {
4297 if (n->gro_list) {
4298 /* flush too old packets
4299 * If HZ < 1000, flush all packets.
4300 */
4301 local_irq_enable();
4302 napi_gro_flush(n, HZ >= 1000);
4303 local_irq_disable();
4304 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004305 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004306 }
David S. Millerfed17f32008-01-07 21:00:40 -08004307 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004308
4309 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310 }
4311out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004312 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004313
Chris Leechdb217332006-06-17 21:24:58 -07004314#ifdef CONFIG_NET_DMA
4315 /*
4316 * There may not be any more sk_buffs coming right now, so push
4317 * any pending DMA copies to hardware
4318 */
Dan Williams2ba05622009-01-06 11:38:14 -07004319 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004320#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004321
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 return;
4323
4324softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004325 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4327 goto out;
4328}
4329
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004330static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331
4332/**
4333 * register_gifconf - register a SIOCGIF handler
4334 * @family: Address family
4335 * @gifconf: Function handler
4336 *
4337 * Register protocol dependent address dumping routines. The handler
4338 * that is passed must not be freed or reused until it has been replaced
4339 * by another handler.
4340 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004341int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342{
4343 if (family >= NPROTO)
4344 return -EINVAL;
4345 gifconf_list[family] = gifconf;
4346 return 0;
4347}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004348EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349
4350
4351/*
4352 * Map an interface index to its name (SIOCGIFNAME)
4353 */
4354
4355/*
4356 * We need this ioctl for efficient implementation of the
4357 * if_indextoname() function required by the IPv6 API. Without
4358 * it, we would have to search all the interfaces to find a
4359 * match. --pb
4360 */
4361
Eric W. Biederman881d9662007-09-17 11:56:21 -07004362static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363{
4364 struct net_device *dev;
4365 struct ifreq ifr;
Brian Haleyc91f6df2012-11-26 05:21:08 +00004366 unsigned seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367
4368 /*
4369 * Fetch the caller's info block.
4370 */
4371
4372 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4373 return -EFAULT;
4374
Brian Haleyc91f6df2012-11-26 05:21:08 +00004375retry:
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004376 seq = read_seqcount_begin(&devnet_rename_seq);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004377 rcu_read_lock();
4378 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004380 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 return -ENODEV;
4382 }
4383
4384 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00004385 rcu_read_unlock();
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00004386 if (read_seqcount_retry(&devnet_rename_seq, seq))
Brian Haleyc91f6df2012-11-26 05:21:08 +00004387 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388
4389 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4390 return -EFAULT;
4391 return 0;
4392}
4393
4394/*
4395 * Perform a SIOCGIFCONF call. This structure will change
4396 * size eventually, and there is nothing I can do about it.
4397 * Thus we will need a 'compatibility mode'.
4398 */
4399
Eric W. Biederman881d9662007-09-17 11:56:21 -07004400static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401{
4402 struct ifconf ifc;
4403 struct net_device *dev;
4404 char __user *pos;
4405 int len;
4406 int total;
4407 int i;
4408
4409 /*
4410 * Fetch the caller's info block.
4411 */
4412
4413 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4414 return -EFAULT;
4415
4416 pos = ifc.ifc_buf;
4417 len = ifc.ifc_len;
4418
4419 /*
4420 * Loop over the interfaces, and write an info block for each.
4421 */
4422
4423 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004424 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 for (i = 0; i < NPROTO; i++) {
4426 if (gifconf_list[i]) {
4427 int done;
4428 if (!pos)
4429 done = gifconf_list[i](dev, NULL, 0);
4430 else
4431 done = gifconf_list[i](dev, pos + total,
4432 len - total);
4433 if (done < 0)
4434 return -EFAULT;
4435 total += done;
4436 }
4437 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439
4440 /*
4441 * All done. Write the updated control block back to the caller.
4442 */
4443 ifc.ifc_len = total;
4444
4445 /*
4446 * Both BSD and Solaris return 0 here, so we do too.
4447 */
4448 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4449}
4450
4451#ifdef CONFIG_PROC_FS
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004452
Eric Dumazet2def16a2012-04-02 22:33:02 +00004453#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004454
4455#define get_bucket(x) ((x) >> BUCKET_SPACE)
4456#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4457#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4458
Eric Dumazet2def16a2012-04-02 22:33:02 +00004459static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004460{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004461 struct net *net = seq_file_net(seq);
4462 struct net_device *dev;
4463 struct hlist_node *p;
4464 struct hlist_head *h;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004465 unsigned int count = 0, offset = get_offset(*pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004466
Eric Dumazet2def16a2012-04-02 22:33:02 +00004467 h = &net->dev_name_head[get_bucket(*pos)];
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004468 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004469 if (++count == offset)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004470 return dev;
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004471 }
4472
4473 return NULL;
4474}
4475
Eric Dumazet2def16a2012-04-02 22:33:02 +00004476static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004477{
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004478 struct net_device *dev;
4479 unsigned int bucket;
4480
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004481 do {
Eric Dumazet2def16a2012-04-02 22:33:02 +00004482 dev = dev_from_same_bucket(seq, pos);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004483 if (dev)
4484 return dev;
4485
Eric Dumazet2def16a2012-04-02 22:33:02 +00004486 bucket = get_bucket(*pos) + 1;
4487 *pos = set_bucket_offset(bucket, 1);
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004488 } while (bucket < NETDEV_HASHENTRIES);
4489
4490 return NULL;
4491}
4492
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493/*
4494 * This is invoked by the /proc filesystem handler to display a device
4495 * in detail.
4496 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004498 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004500 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07004501 if (!*pos)
4502 return SEQ_START_TOKEN;
4503
Eric Dumazet2def16a2012-04-02 22:33:02 +00004504 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
Mihai Maruseacf04565d2011-10-20 20:45:10 +00004505 return NULL;
Pavel Emelianov7562f872007-05-03 15:13:45 -07004506
Eric Dumazet2def16a2012-04-02 22:33:02 +00004507 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508}
4509
4510void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4511{
4512 ++*pos;
Eric Dumazet2def16a2012-04-02 22:33:02 +00004513 return dev_from_bucket(seq, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514}
4515
4516void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004517 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08004519 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520}
4521
4522static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4523{
Eric Dumazet28172732010-07-07 14:58:56 -07004524 struct rtnl_link_stats64 temp;
4525 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00004527 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4528 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
Rusty Russell5a1b5892007-04-28 21:04:03 -07004529 dev->name, stats->rx_bytes, stats->rx_packets,
4530 stats->rx_errors,
4531 stats->rx_dropped + stats->rx_missed_errors,
4532 stats->rx_fifo_errors,
4533 stats->rx_length_errors + stats->rx_over_errors +
4534 stats->rx_crc_errors + stats->rx_frame_errors,
4535 stats->rx_compressed, stats->multicast,
4536 stats->tx_bytes, stats->tx_packets,
4537 stats->tx_errors, stats->tx_dropped,
4538 stats->tx_fifo_errors, stats->collisions,
4539 stats->tx_carrier_errors +
4540 stats->tx_aborted_errors +
4541 stats->tx_window_errors +
4542 stats->tx_heartbeat_errors,
4543 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544}
4545
4546/*
4547 * Called from the PROCfs module. This now uses the new arbitrary sized
4548 * /proc/net interface to create /proc/net/dev
4549 */
4550static int dev_seq_show(struct seq_file *seq, void *v)
4551{
4552 if (v == SEQ_START_TOKEN)
4553 seq_puts(seq, "Inter-| Receive "
4554 " | Transmit\n"
4555 " face |bytes packets errs drop fifo frame "
4556 "compressed multicast|bytes packets errs "
4557 "drop fifo colls carrier compressed\n");
4558 else
4559 dev_seq_printf_stats(seq, v);
4560 return 0;
4561}
4562
Changli Gaodee42872010-05-02 05:42:16 +00004563static struct softnet_data *softnet_get_online(loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564{
Changli Gaodee42872010-05-02 05:42:16 +00004565 struct softnet_data *sd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004567 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004568 if (cpu_online(*pos)) {
Changli Gaodee42872010-05-02 05:42:16 +00004569 sd = &per_cpu(softnet_data, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570 break;
4571 } else
4572 ++*pos;
Changli Gaodee42872010-05-02 05:42:16 +00004573 return sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574}
4575
4576static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4577{
4578 return softnet_get_online(pos);
4579}
4580
4581static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4582{
4583 ++*pos;
4584 return softnet_get_online(pos);
4585}
4586
4587static void softnet_seq_stop(struct seq_file *seq, void *v)
4588{
4589}
4590
4591static int softnet_seq_show(struct seq_file *seq, void *v)
4592{
Changli Gaodee42872010-05-02 05:42:16 +00004593 struct softnet_data *sd = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594
Tom Herbert0a9627f2010-03-16 08:03:29 +00004595 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Changli Gaodee42872010-05-02 05:42:16 +00004596 sd->processed, sd->dropped, sd->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07004597 0, 0, 0, 0, /* was fastroute */
Changli Gaodee42872010-05-02 05:42:16 +00004598 sd->cpu_collision, sd->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 return 0;
4600}
4601
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004602static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 .start = dev_seq_start,
4604 .next = dev_seq_next,
4605 .stop = dev_seq_stop,
4606 .show = dev_seq_show,
4607};
4608
4609static int dev_seq_open(struct inode *inode, struct file *file)
4610{
Denis V. Luneve372c412007-11-19 22:31:54 -08004611 return seq_open_net(inode, file, &dev_seq_ops,
Eric Dumazet2def16a2012-04-02 22:33:02 +00004612 sizeof(struct seq_net_private));
Anton Blanchard5cac98d2011-11-27 21:14:46 +00004613}
4614
Arjan van de Ven9a321442007-02-12 00:55:35 -08004615static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 .owner = THIS_MODULE,
4617 .open = dev_seq_open,
4618 .read = seq_read,
4619 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08004620 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621};
4622
Stephen Hemmingerf6908082007-03-12 14:34:29 -07004623static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 .start = softnet_seq_start,
4625 .next = softnet_seq_next,
4626 .stop = softnet_seq_stop,
4627 .show = softnet_seq_show,
4628};
4629
4630static int softnet_seq_open(struct inode *inode, struct file *file)
4631{
4632 return seq_open(file, &softnet_seq_ops);
4633}
4634
Arjan van de Ven9a321442007-02-12 00:55:35 -08004635static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 .owner = THIS_MODULE,
4637 .open = softnet_seq_open,
4638 .read = seq_read,
4639 .llseek = seq_lseek,
4640 .release = seq_release,
4641};
4642
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004643static void *ptype_get_idx(loff_t pos)
4644{
4645 struct packet_type *pt = NULL;
4646 loff_t i = 0;
4647 int t;
4648
4649 list_for_each_entry_rcu(pt, &ptype_all, list) {
4650 if (i == pos)
4651 return pt;
4652 ++i;
4653 }
4654
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004655 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004656 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4657 if (i == pos)
4658 return pt;
4659 ++i;
4660 }
4661 }
4662 return NULL;
4663}
4664
4665static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004666 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004667{
4668 rcu_read_lock();
4669 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4670}
4671
4672static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4673{
4674 struct packet_type *pt;
4675 struct list_head *nxt;
4676 int hash;
4677
4678 ++*pos;
4679 if (v == SEQ_START_TOKEN)
4680 return ptype_get_idx(0);
4681
4682 pt = v;
4683 nxt = pt->list.next;
4684 if (pt->type == htons(ETH_P_ALL)) {
4685 if (nxt != &ptype_all)
4686 goto found;
4687 hash = 0;
4688 nxt = ptype_base[0].next;
4689 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004690 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004691
4692 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004693 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004694 return NULL;
4695 nxt = ptype_base[hash].next;
4696 }
4697found:
4698 return list_entry(nxt, struct packet_type, list);
4699}
4700
4701static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08004702 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004703{
4704 rcu_read_unlock();
4705}
4706
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004707static int ptype_seq_show(struct seq_file *seq, void *v)
4708{
4709 struct packet_type *pt = v;
4710
4711 if (v == SEQ_START_TOKEN)
4712 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004713 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004714 if (pt->type == htons(ETH_P_ALL))
4715 seq_puts(seq, "ALL ");
4716 else
4717 seq_printf(seq, "%04x", ntohs(pt->type));
4718
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08004719 seq_printf(seq, " %-8s %pF\n",
4720 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004721 }
4722
4723 return 0;
4724}
4725
4726static const struct seq_operations ptype_seq_ops = {
4727 .start = ptype_seq_start,
4728 .next = ptype_seq_next,
4729 .stop = ptype_seq_stop,
4730 .show = ptype_seq_show,
4731};
4732
4733static int ptype_seq_open(struct inode *inode, struct file *file)
4734{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004735 return seq_open_net(inode, file, &ptype_seq_ops,
4736 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004737}
4738
4739static const struct file_operations ptype_seq_fops = {
4740 .owner = THIS_MODULE,
4741 .open = ptype_seq_open,
4742 .read = seq_read,
4743 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07004744 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004745};
4746
4747
Pavel Emelyanov46650792007-10-08 20:38:39 -07004748static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749{
4750 int rc = -ENOMEM;
4751
Eric W. Biederman881d9662007-09-17 11:56:21 -07004752 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004754 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004756 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004757 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07004758
Eric W. Biederman881d9662007-09-17 11:56:21 -07004759 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004760 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 rc = 0;
4762out:
4763 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02004764out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004765 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004767 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004769 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 goto out;
4771}
Eric W. Biederman881d9662007-09-17 11:56:21 -07004772
Pavel Emelyanov46650792007-10-08 20:38:39 -07004773static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004774{
4775 wext_proc_exit(net);
4776
4777 proc_net_remove(net, "ptype");
4778 proc_net_remove(net, "softnet_stat");
4779 proc_net_remove(net, "dev");
4780}
4781
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004782static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004783 .init = dev_proc_net_init,
4784 .exit = dev_proc_net_exit,
4785};
4786
4787static int __init dev_proc_init(void)
4788{
4789 return register_pernet_subsys(&dev_proc_ops);
4790}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791#else
4792#define dev_proc_init() 0
4793#endif /* CONFIG_PROC_FS */
4794
4795
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004796struct netdev_upper {
4797 struct net_device *dev;
4798 bool master;
4799 struct list_head list;
4800 struct rcu_head rcu;
4801 struct list_head search_list;
4802};
4803
4804static void __append_search_uppers(struct list_head *search_list,
4805 struct net_device *dev)
4806{
4807 struct netdev_upper *upper;
4808
4809 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4810 /* check if this upper is not already in search list */
4811 if (list_empty(&upper->search_list))
4812 list_add_tail(&upper->search_list, search_list);
4813 }
4814}
4815
4816static bool __netdev_search_upper_dev(struct net_device *dev,
4817 struct net_device *upper_dev)
4818{
4819 LIST_HEAD(search_list);
4820 struct netdev_upper *upper;
4821 struct netdev_upper *tmp;
4822 bool ret = false;
4823
4824 __append_search_uppers(&search_list, dev);
4825 list_for_each_entry(upper, &search_list, search_list) {
4826 if (upper->dev == upper_dev) {
4827 ret = true;
4828 break;
4829 }
4830 __append_search_uppers(&search_list, upper->dev);
4831 }
4832 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4833 INIT_LIST_HEAD(&upper->search_list);
4834 return ret;
4835}
4836
4837static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4838 struct net_device *upper_dev)
4839{
4840 struct netdev_upper *upper;
4841
4842 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4843 if (upper->dev == upper_dev)
4844 return upper;
4845 }
4846 return NULL;
4847}
4848
4849/**
4850 * netdev_has_upper_dev - Check if device is linked to an upper device
4851 * @dev: device
4852 * @upper_dev: upper device to check
4853 *
4854 * Find out if a device is linked to specified upper device and return true
4855 * in case it is. Note that this checks only immediate upper device,
4856 * not through a complete stack of devices. The caller must hold the RTNL lock.
4857 */
4858bool netdev_has_upper_dev(struct net_device *dev,
4859 struct net_device *upper_dev)
4860{
4861 ASSERT_RTNL();
4862
4863 return __netdev_find_upper(dev, upper_dev);
4864}
4865EXPORT_SYMBOL(netdev_has_upper_dev);
4866
4867/**
4868 * netdev_has_any_upper_dev - Check if device is linked to some device
4869 * @dev: device
4870 *
4871 * Find out if a device is linked to an upper device and return true in case
4872 * it is. The caller must hold the RTNL lock.
4873 */
4874bool netdev_has_any_upper_dev(struct net_device *dev)
4875{
4876 ASSERT_RTNL();
4877
4878 return !list_empty(&dev->upper_dev_list);
4879}
4880EXPORT_SYMBOL(netdev_has_any_upper_dev);
4881
4882/**
4883 * netdev_master_upper_dev_get - Get master upper device
4884 * @dev: device
4885 *
4886 * Find a master upper device and return pointer to it or NULL in case
4887 * it's not there. The caller must hold the RTNL lock.
4888 */
4889struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4890{
4891 struct netdev_upper *upper;
4892
4893 ASSERT_RTNL();
4894
4895 if (list_empty(&dev->upper_dev_list))
4896 return NULL;
4897
4898 upper = list_first_entry(&dev->upper_dev_list,
4899 struct netdev_upper, list);
4900 if (likely(upper->master))
4901 return upper->dev;
4902 return NULL;
4903}
4904EXPORT_SYMBOL(netdev_master_upper_dev_get);
4905
4906/**
4907 * netdev_master_upper_dev_get_rcu - Get master upper device
4908 * @dev: device
4909 *
4910 * Find a master upper device and return pointer to it or NULL in case
4911 * it's not there. The caller must hold the RCU read lock.
4912 */
4913struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4914{
4915 struct netdev_upper *upper;
4916
4917 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4918 struct netdev_upper, list);
4919 if (upper && likely(upper->master))
4920 return upper->dev;
4921 return NULL;
4922}
4923EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4924
4925static int __netdev_upper_dev_link(struct net_device *dev,
4926 struct net_device *upper_dev, bool master)
4927{
4928 struct netdev_upper *upper;
4929
4930 ASSERT_RTNL();
4931
4932 if (dev == upper_dev)
4933 return -EBUSY;
4934
4935 /* To prevent loops, check if dev is not upper device to upper_dev. */
4936 if (__netdev_search_upper_dev(upper_dev, dev))
4937 return -EBUSY;
4938
4939 if (__netdev_find_upper(dev, upper_dev))
4940 return -EEXIST;
4941
4942 if (master && netdev_master_upper_dev_get(dev))
4943 return -EBUSY;
4944
4945 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4946 if (!upper)
4947 return -ENOMEM;
4948
4949 upper->dev = upper_dev;
4950 upper->master = master;
4951 INIT_LIST_HEAD(&upper->search_list);
4952
4953 /* Ensure that master upper link is always the first item in list. */
4954 if (master)
4955 list_add_rcu(&upper->list, &dev->upper_dev_list);
4956 else
4957 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4958 dev_hold(upper_dev);
4959
4960 return 0;
4961}
4962
4963/**
4964 * netdev_upper_dev_link - Add a link to the upper device
4965 * @dev: device
4966 * @upper_dev: new upper device
4967 *
4968 * Adds a link to device which is upper to this one. The caller must hold
4969 * the RTNL lock. On a failure a negative errno code is returned.
4970 * On success the reference counts are adjusted and the function
4971 * returns zero.
4972 */
4973int netdev_upper_dev_link(struct net_device *dev,
4974 struct net_device *upper_dev)
4975{
4976 return __netdev_upper_dev_link(dev, upper_dev, false);
4977}
4978EXPORT_SYMBOL(netdev_upper_dev_link);
4979
4980/**
4981 * netdev_master_upper_dev_link - Add a master link to the upper device
4982 * @dev: device
4983 * @upper_dev: new upper device
4984 *
4985 * Adds a link to device which is upper to this one. In this case, only
4986 * one master upper device can be linked, although other non-master devices
4987 * might be linked as well. The caller must hold the RTNL lock.
4988 * On a failure a negative errno code is returned. On success the reference
4989 * counts are adjusted and the function returns zero.
4990 */
4991int netdev_master_upper_dev_link(struct net_device *dev,
4992 struct net_device *upper_dev)
4993{
4994 return __netdev_upper_dev_link(dev, upper_dev, true);
4995}
4996EXPORT_SYMBOL(netdev_master_upper_dev_link);
4997
4998/**
4999 * netdev_upper_dev_unlink - Removes a link to upper device
5000 * @dev: device
5001 * @upper_dev: new upper device
5002 *
5003 * Removes a link to device which is upper to this one. The caller must hold
5004 * the RTNL lock.
5005 */
5006void netdev_upper_dev_unlink(struct net_device *dev,
5007 struct net_device *upper_dev)
5008{
5009 struct netdev_upper *upper;
5010
5011 ASSERT_RTNL();
5012
5013 upper = __netdev_find_upper(dev, upper_dev);
5014 if (!upper)
5015 return;
5016 list_del_rcu(&upper->list);
5017 dev_put(upper_dev);
5018 kfree_rcu(upper, rcu);
5019}
5020EXPORT_SYMBOL(netdev_upper_dev_unlink);
5021
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005022static void dev_change_rx_flags(struct net_device *dev, int flags)
5023{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005024 const struct net_device_ops *ops = dev->netdev_ops;
5025
5026 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
5027 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005028}
5029
Wang Chendad9b332008-06-18 01:48:28 -07005030static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07005031{
Eric Dumazetb536db92011-11-30 21:42:26 +00005032 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005033 kuid_t uid;
5034 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005035
Patrick McHardy24023452007-07-14 18:51:31 -07005036 ASSERT_RTNL();
5037
Wang Chendad9b332008-06-18 01:48:28 -07005038 dev->flags |= IFF_PROMISC;
5039 dev->promiscuity += inc;
5040 if (dev->promiscuity == 0) {
5041 /*
5042 * Avoid overflow.
5043 * If inc causes overflow, untouch promisc and return error.
5044 */
5045 if (inc < 0)
5046 dev->flags &= ~IFF_PROMISC;
5047 else {
5048 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005049 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5050 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005051 return -EOVERFLOW;
5052 }
5053 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005054 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005055 pr_info("device %s %s promiscuous mode\n",
5056 dev->name,
5057 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005058 if (audit_enabled) {
5059 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005060 audit_log(current->audit_context, GFP_ATOMIC,
5061 AUDIT_ANOM_PROMISCUOUS,
5062 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5063 dev->name, (dev->flags & IFF_PROMISC),
5064 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005065 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005066 from_kuid(&init_user_ns, uid),
5067 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005068 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005069 }
Patrick McHardy24023452007-07-14 18:51:31 -07005070
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005071 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005072 }
Wang Chendad9b332008-06-18 01:48:28 -07005073 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005074}
5075
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076/**
5077 * dev_set_promiscuity - update promiscuity count on a device
5078 * @dev: device
5079 * @inc: modifier
5080 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005081 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082 * remains above zero the interface remains promiscuous. Once it hits zero
5083 * the device reverts back to normal filtering operation. A negative inc
5084 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005085 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086 */
Wang Chendad9b332008-06-18 01:48:28 -07005087int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088{
Eric Dumazetb536db92011-11-30 21:42:26 +00005089 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005090 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091
Wang Chendad9b332008-06-18 01:48:28 -07005092 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005093 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005094 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005095 if (dev->flags != old_flags)
5096 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005097 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005099EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100
5101/**
5102 * dev_set_allmulti - update allmulti count on a device
5103 * @dev: device
5104 * @inc: modifier
5105 *
5106 * Add or remove reception of all multicast frames to a device. While the
5107 * count in the device remains above zero the interface remains listening
5108 * to all interfaces. Once it hits zero the device reverts back to normal
5109 * filtering operation. A negative @inc value is used to drop the counter
5110 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07005111 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112 */
5113
Wang Chendad9b332008-06-18 01:48:28 -07005114int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115{
Eric Dumazetb536db92011-11-30 21:42:26 +00005116 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117
Patrick McHardy24023452007-07-14 18:51:31 -07005118 ASSERT_RTNL();
5119
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005121 dev->allmulti += inc;
5122 if (dev->allmulti == 0) {
5123 /*
5124 * Avoid overflow.
5125 * If inc causes overflow, untouch allmulti and return error.
5126 */
5127 if (inc < 0)
5128 dev->flags &= ~IFF_ALLMULTI;
5129 else {
5130 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005131 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5132 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005133 return -EOVERFLOW;
5134 }
5135 }
Patrick McHardy24023452007-07-14 18:51:31 -07005136 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005137 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005138 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07005139 }
Wang Chendad9b332008-06-18 01:48:28 -07005140 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005141}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005142EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005143
5144/*
5145 * Upload unicast and multicast address lists to device and
5146 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005147 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005148 * are present.
5149 */
5150void __dev_set_rx_mode(struct net_device *dev)
5151{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005152 const struct net_device_ops *ops = dev->netdev_ops;
5153
Patrick McHardy4417da62007-06-27 01:28:10 -07005154 /* dev_open will call this function so the list will stay sane. */
5155 if (!(dev->flags&IFF_UP))
5156 return;
5157
5158 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005159 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005160
Jiri Pirko01789342011-08-16 06:29:00 +00005161 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005162 /* Unicast addresses changes may only happen under the rtnl,
5163 * therefore calling __dev_set_promiscuity here is safe.
5164 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005165 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005166 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07005167 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005168 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005169 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07005170 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005171 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005172 }
Jiri Pirko01789342011-08-16 06:29:00 +00005173
5174 if (ops->ndo_set_rx_mode)
5175 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005176}
5177
5178void dev_set_rx_mode(struct net_device *dev)
5179{
David S. Millerb9e40852008-07-15 00:15:08 -07005180 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005181 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005182 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183}
5184
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005185/**
5186 * dev_get_flags - get flags reported to userspace
5187 * @dev: device
5188 *
5189 * Get the combination of flag bits exported through APIs to userspace.
5190 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005191unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192{
Eric Dumazet95c96172012-04-15 05:58:06 +00005193 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194
5195 flags = (dev->flags & ~(IFF_PROMISC |
5196 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005197 IFF_RUNNING |
5198 IFF_LOWER_UP |
5199 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 (dev->gflags & (IFF_PROMISC |
5201 IFF_ALLMULTI));
5202
Stefan Rompfb00055a2006-03-20 17:09:11 -08005203 if (netif_running(dev)) {
5204 if (netif_oper_up(dev))
5205 flags |= IFF_RUNNING;
5206 if (netif_carrier_ok(dev))
5207 flags |= IFF_LOWER_UP;
5208 if (netif_dormant(dev))
5209 flags |= IFF_DORMANT;
5210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211
5212 return flags;
5213}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005214EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215
Patrick McHardybd380812010-02-26 06:34:53 +00005216int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217{
Eric Dumazetb536db92011-11-30 21:42:26 +00005218 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005219 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220
Patrick McHardy24023452007-07-14 18:51:31 -07005221 ASSERT_RTNL();
5222
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 /*
5224 * Set the flags on our device.
5225 */
5226
5227 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5228 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5229 IFF_AUTOMEDIA)) |
5230 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5231 IFF_ALLMULTI));
5232
5233 /*
5234 * Load in the correct multicast list now the flags have changed.
5235 */
5236
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005237 if ((old_flags ^ flags) & IFF_MULTICAST)
5238 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005239
Patrick McHardy4417da62007-06-27 01:28:10 -07005240 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241
5242 /*
5243 * Have we downed the interface. We handle IFF_UP ourselves
5244 * according to user attempts to set it, rather than blindly
5245 * setting it.
5246 */
5247
5248 ret = 0;
5249 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005250 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251
5252 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005253 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254 }
5255
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005257 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5258
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 dev->gflags ^= IFF_PROMISC;
5260 dev_set_promiscuity(dev, inc);
5261 }
5262
5263 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5264 is important. Some (broken) drivers set IFF_PROMISC, when
5265 IFF_ALLMULTI is requested not asking us and not reporting.
5266 */
5267 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005268 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5269
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270 dev->gflags ^= IFF_ALLMULTI;
5271 dev_set_allmulti(dev, inc);
5272 }
5273
Patrick McHardybd380812010-02-26 06:34:53 +00005274 return ret;
5275}
5276
5277void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
5278{
5279 unsigned int changes = dev->flags ^ old_flags;
5280
5281 if (changes & IFF_UP) {
5282 if (dev->flags & IFF_UP)
5283 call_netdevice_notifiers(NETDEV_UP, dev);
5284 else
5285 call_netdevice_notifiers(NETDEV_DOWN, dev);
5286 }
5287
5288 if (dev->flags & IFF_UP &&
5289 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
5290 call_netdevice_notifiers(NETDEV_CHANGE, dev);
5291}
5292
5293/**
5294 * dev_change_flags - change device settings
5295 * @dev: device
5296 * @flags: device state flags
5297 *
5298 * Change settings on device based state flags. The flags are
5299 * in the userspace exported format.
5300 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005301int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005302{
Eric Dumazetb536db92011-11-30 21:42:26 +00005303 int ret;
5304 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005305
5306 ret = __dev_change_flags(dev, flags);
5307 if (ret < 0)
5308 return ret;
5309
5310 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07005311 if (changes)
5312 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313
Patrick McHardybd380812010-02-26 06:34:53 +00005314 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 return ret;
5316}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005317EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005319/**
5320 * dev_set_mtu - Change maximum transfer unit
5321 * @dev: device
5322 * @new_mtu: new transfer unit
5323 *
5324 * Change the maximum transfer size of the network device.
5325 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326int dev_set_mtu(struct net_device *dev, int new_mtu)
5327{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005328 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 int err;
5330
5331 if (new_mtu == dev->mtu)
5332 return 0;
5333
5334 /* MTU must be positive. */
5335 if (new_mtu < 0)
5336 return -EINVAL;
5337
5338 if (!netif_device_present(dev))
5339 return -ENODEV;
5340
5341 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005342 if (ops->ndo_change_mtu)
5343 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344 else
5345 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005346
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005347 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005348 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349 return err;
5350}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005351EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005353/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005354 * dev_set_group - Change group this device belongs to
5355 * @dev: device
5356 * @new_group: group this device should belong to
5357 */
5358void dev_set_group(struct net_device *dev, int new_group)
5359{
5360 dev->group = new_group;
5361}
5362EXPORT_SYMBOL(dev_set_group);
5363
5364/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005365 * dev_set_mac_address - Change Media Access Control Address
5366 * @dev: device
5367 * @sa: new address
5368 *
5369 * Change the hardware (MAC) address of the device
5370 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005371int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5372{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005373 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374 int err;
5375
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005376 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377 return -EOPNOTSUPP;
5378 if (sa->sa_family != dev->type)
5379 return -EINVAL;
5380 if (!netif_device_present(dev))
5381 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005382 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005383 if (err)
5384 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005385 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005386 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005387 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005388 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005390EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005392/**
5393 * dev_change_carrier - Change device carrier
5394 * @dev: device
5395 * @new_carries: new value
5396 *
5397 * Change device carrier
5398 */
5399int dev_change_carrier(struct net_device *dev, bool new_carrier)
5400{
5401 const struct net_device_ops *ops = dev->netdev_ops;
5402
5403 if (!ops->ndo_change_carrier)
5404 return -EOPNOTSUPP;
5405 if (!netif_device_present(dev))
5406 return -ENODEV;
5407 return ops->ndo_change_carrier(dev, new_carrier);
5408}
5409EXPORT_SYMBOL(dev_change_carrier);
5410
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00005412 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07005414static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415{
5416 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00005417 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418
5419 if (!dev)
5420 return -ENODEV;
5421
5422 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005423 case SIOCGIFFLAGS: /* Get interface flags */
5424 ifr->ifr_flags = (short) dev_get_flags(dev);
5425 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005427 case SIOCGIFMETRIC: /* Get the metric on the interface
5428 (currently unused) */
5429 ifr->ifr_metric = 0;
5430 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005432 case SIOCGIFMTU: /* Get the MTU of a device */
5433 ifr->ifr_mtu = dev->mtu;
5434 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005436 case SIOCGIFHWADDR:
5437 if (!dev->addr_len)
5438 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5439 else
5440 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5441 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5442 ifr->ifr_hwaddr.sa_family = dev->type;
5443 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005445 case SIOCGIFSLAVE:
5446 err = -EINVAL;
5447 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005448
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005449 case SIOCGIFMAP:
5450 ifr->ifr_map.mem_start = dev->mem_start;
5451 ifr->ifr_map.mem_end = dev->mem_end;
5452 ifr->ifr_map.base_addr = dev->base_addr;
5453 ifr->ifr_map.irq = dev->irq;
5454 ifr->ifr_map.dma = dev->dma;
5455 ifr->ifr_map.port = dev->if_port;
5456 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005457
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005458 case SIOCGIFINDEX:
5459 ifr->ifr_ifindex = dev->ifindex;
5460 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005461
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005462 case SIOCGIFTXQLEN:
5463 ifr->ifr_qlen = dev->tx_queue_len;
5464 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005465
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005466 default:
5467 /* dev_ioctl() should ensure this case
5468 * is never reached
5469 */
5470 WARN_ON(1);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005471 err = -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005472 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005473
5474 }
5475 return err;
5476}
5477
5478/*
5479 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5480 */
5481static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5482{
5483 int err;
5484 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005485 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005486
5487 if (!dev)
5488 return -ENODEV;
5489
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08005490 ops = dev->netdev_ops;
5491
Jeff Garzik14e3e072007-10-08 00:06:32 -07005492 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005493 case SIOCSIFFLAGS: /* Set interface flags */
5494 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005495
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005496 case SIOCSIFMETRIC: /* Set the metric on the interface
5497 (currently unused) */
5498 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07005499
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005500 case SIOCSIFMTU: /* Set the MTU of a device */
5501 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07005502
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005503 case SIOCSIFHWADDR:
5504 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005506 case SIOCSIFHWBROADCAST:
5507 if (ifr->ifr_hwaddr.sa_family != dev->type)
5508 return -EINVAL;
5509 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5510 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5512 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005513
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005514 case SIOCSIFMAP:
5515 if (ops->ndo_set_config) {
5516 if (!netif_device_present(dev))
5517 return -ENODEV;
5518 return ops->ndo_set_config(dev, &ifr->ifr_map);
5519 }
5520 return -EOPNOTSUPP;
5521
5522 case SIOCADDMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005523 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005524 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5525 return -EINVAL;
5526 if (!netif_device_present(dev))
5527 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005528 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005529
5530 case SIOCDELMULTI:
Jiri Pirkob81693d2011-08-16 06:29:02 +00005531 if (!ops->ndo_set_rx_mode ||
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005532 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5533 return -EINVAL;
5534 if (!netif_device_present(dev))
5535 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00005536 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005537
5538 case SIOCSIFTXQLEN:
5539 if (ifr->ifr_qlen < 0)
5540 return -EINVAL;
5541 dev->tx_queue_len = ifr->ifr_qlen;
5542 return 0;
5543
5544 case SIOCSIFNAME:
5545 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5546 return dev_change_name(dev, ifr->ifr_newname);
5547
Richard Cochran4dc360c2011-10-19 17:00:35 -04005548 case SIOCSHWTSTAMP:
5549 err = net_hwtstamp_validate(ifr);
5550 if (err)
5551 return err;
5552 /* fall through */
5553
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005554 /*
5555 * Unknown or private ioctl
5556 */
5557 default:
5558 if ((cmd >= SIOCDEVPRIVATE &&
5559 cmd <= SIOCDEVPRIVATE + 15) ||
5560 cmd == SIOCBONDENSLAVE ||
5561 cmd == SIOCBONDRELEASE ||
5562 cmd == SIOCBONDSETHWADDR ||
5563 cmd == SIOCBONDSLAVEINFOQUERY ||
5564 cmd == SIOCBONDINFOQUERY ||
5565 cmd == SIOCBONDCHANGEACTIVE ||
5566 cmd == SIOCGMIIPHY ||
5567 cmd == SIOCGMIIREG ||
5568 cmd == SIOCSMIIREG ||
5569 cmd == SIOCBRADDIF ||
5570 cmd == SIOCBRDELIF ||
5571 cmd == SIOCSHWTSTAMP ||
5572 cmd == SIOCWANDEV) {
5573 err = -EOPNOTSUPP;
5574 if (ops->ndo_do_ioctl) {
5575 if (netif_device_present(dev))
5576 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5577 else
5578 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005580 } else
5581 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582
5583 }
5584 return err;
5585}
5586
5587/*
5588 * This function handles all "interface"-type I/O control requests. The actual
5589 * 'doing' part of this is dev_ifsioc above.
5590 */
5591
5592/**
5593 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005594 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005595 * @cmd: command to issue
5596 * @arg: pointer to a struct ifreq in user space
5597 *
5598 * Issue ioctl functions to devices. This is normally called by the
5599 * user space syscall interfaces but can sometimes be useful for
5600 * other purposes. The return value is the return from the syscall if
5601 * positive or a negative errno code on error.
5602 */
5603
Eric W. Biederman881d9662007-09-17 11:56:21 -07005604int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005605{
5606 struct ifreq ifr;
5607 int ret;
5608 char *colon;
5609
5610 /* One special case: SIOCGIFCONF takes ifconf argument
5611 and requires shared lock, because it sleeps writing
5612 to user space.
5613 */
5614
5615 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005616 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005617 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005618 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005619 return ret;
5620 }
5621 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005622 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005623
5624 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5625 return -EFAULT;
5626
5627 ifr.ifr_name[IFNAMSIZ-1] = 0;
5628
5629 colon = strchr(ifr.ifr_name, ':');
5630 if (colon)
5631 *colon = 0;
5632
5633 /*
5634 * See which interface the caller is talking about.
5635 */
5636
5637 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005638 /*
5639 * These ioctl calls:
5640 * - can be done by all.
5641 * - atomic and do not require locking.
5642 * - return a value
5643 */
5644 case SIOCGIFFLAGS:
5645 case SIOCGIFMETRIC:
5646 case SIOCGIFMTU:
5647 case SIOCGIFHWADDR:
5648 case SIOCGIFSLAVE:
5649 case SIOCGIFMAP:
5650 case SIOCGIFINDEX:
5651 case SIOCGIFTXQLEN:
5652 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005653 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005654 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00005655 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005656 if (!ret) {
5657 if (colon)
5658 *colon = ':';
5659 if (copy_to_user(arg, &ifr,
5660 sizeof(struct ifreq)))
5661 ret = -EFAULT;
5662 }
5663 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005664
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005665 case SIOCETHTOOL:
5666 dev_load(net, ifr.ifr_name);
5667 rtnl_lock();
5668 ret = dev_ethtool(net, &ifr);
5669 rtnl_unlock();
5670 if (!ret) {
5671 if (colon)
5672 *colon = ':';
5673 if (copy_to_user(arg, &ifr,
5674 sizeof(struct ifreq)))
5675 ret = -EFAULT;
5676 }
5677 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005679 /*
5680 * These ioctl calls:
5681 * - require superuser power.
5682 * - require strict serialization.
5683 * - return a value
5684 */
5685 case SIOCGMIIPHY:
5686 case SIOCGMIIREG:
5687 case SIOCSIFNAME:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005688 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005689 return -EPERM;
5690 dev_load(net, ifr.ifr_name);
5691 rtnl_lock();
5692 ret = dev_ifsioc(net, &ifr, cmd);
5693 rtnl_unlock();
5694 if (!ret) {
5695 if (colon)
5696 *colon = ':';
5697 if (copy_to_user(arg, &ifr,
5698 sizeof(struct ifreq)))
5699 ret = -EFAULT;
5700 }
5701 return ret;
5702
5703 /*
5704 * These ioctl calls:
5705 * - require superuser power.
5706 * - require strict serialization.
5707 * - do not return a value
5708 */
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005709 case SIOCSIFMAP:
5710 case SIOCSIFTXQLEN:
5711 if (!capable(CAP_NET_ADMIN))
5712 return -EPERM;
5713 /* fall through */
5714 /*
5715 * These ioctl calls:
5716 * - require local superuser power.
5717 * - require strict serialization.
5718 * - do not return a value
5719 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005720 case SIOCSIFFLAGS:
5721 case SIOCSIFMETRIC:
5722 case SIOCSIFMTU:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005723 case SIOCSIFHWADDR:
5724 case SIOCSIFSLAVE:
5725 case SIOCADDMULTI:
5726 case SIOCDELMULTI:
5727 case SIOCSIFHWBROADCAST:
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005728 case SIOCSMIIREG:
5729 case SIOCBONDENSLAVE:
5730 case SIOCBONDRELEASE:
5731 case SIOCBONDSETHWADDR:
5732 case SIOCBONDCHANGEACTIVE:
5733 case SIOCBRADDIF:
5734 case SIOCBRDELIF:
5735 case SIOCSHWTSTAMP:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +00005736 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005737 return -EPERM;
5738 /* fall through */
5739 case SIOCBONDSLAVEINFOQUERY:
5740 case SIOCBONDINFOQUERY:
5741 dev_load(net, ifr.ifr_name);
5742 rtnl_lock();
5743 ret = dev_ifsioc(net, &ifr, cmd);
5744 rtnl_unlock();
5745 return ret;
5746
5747 case SIOCGIFMEM:
5748 /* Get the per device memory space. We can add this but
5749 * currently do not support it */
5750 case SIOCSIFMEM:
5751 /* Set the per device memory buffer space.
5752 * Not applicable in our case */
5753 case SIOCSIFLINK:
Lifeng Sun41c31f32011-04-27 22:04:51 +00005754 return -ENOTTY;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005755
5756 /*
5757 * Unknown or private ioctl.
5758 */
5759 default:
5760 if (cmd == SIOCWANDEV ||
5761 (cmd >= SIOCDEVPRIVATE &&
5762 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005763 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005765 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005767 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005768 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005769 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005771 }
5772 /* Take care of Wireless Extensions */
5773 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5774 return wext_handle_ioctl(net, &ifr, cmd, arg);
Lifeng Sun41c31f32011-04-27 22:04:51 +00005775 return -ENOTTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005776 }
5777}
5778
5779
5780/**
5781 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005782 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783 *
5784 * Returns a suitable unique value for a new device interface
5785 * number. The caller must hold the rtnl semaphore or the
5786 * dev_base_lock to be sure it remains unique.
5787 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005788static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005789{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005790 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791 for (;;) {
5792 if (++ifindex <= 0)
5793 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005794 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005795 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005796 }
5797}
5798
Linus Torvalds1da177e2005-04-16 15:20:36 -07005799/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005800static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005801
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005802static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005803{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005805}
5806
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005807static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005808{
Krishna Kumare93737b2009-12-08 22:26:02 +00005809 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005810
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005811 BUG_ON(dev_boot_phase);
5812 ASSERT_RTNL();
5813
Krishna Kumare93737b2009-12-08 22:26:02 +00005814 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005815 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005816 * for initialization unwind. Remove those
5817 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005818 */
5819 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005820 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5821 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005822
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005823 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005824 list_del(&dev->unreg_list);
5825 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005826 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005827 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005828 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005829 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005830
Octavian Purdila44345722010-12-13 12:44:07 +00005831 /* If device is running, close it first. */
5832 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005833
Octavian Purdila44345722010-12-13 12:44:07 +00005834 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005835 /* And unlink it from device chain. */
5836 unlist_netdevice(dev);
5837
5838 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005839 }
5840
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005841 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005842
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005843 list_for_each_entry(dev, head, unreg_list) {
5844 /* Shutdown queueing discipline. */
5845 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005846
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005847
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005848 /* Notify protocols, that we are about to destroy
5849 this device. They should clean all the things.
5850 */
5851 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5852
Patrick McHardya2835762010-02-26 06:34:51 +00005853 if (!dev->rtnl_link_ops ||
5854 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5855 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5856
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005857 /*
5858 * Flush the unicast and multicast chains
5859 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005860 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005861 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005862
5863 if (dev->netdev_ops->ndo_uninit)
5864 dev->netdev_ops->ndo_uninit(dev);
5865
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005866 /* Notifier chain MUST detach us all upper devices. */
5867 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005868
5869 /* Remove entries from kobject tree */
5870 netdev_unregister_kobject(dev);
5871 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005872
Eric W. Biederman850a5452011-10-13 22:25:23 +00005873 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005874
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005875 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005876 dev_put(dev);
5877}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005878
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005879static void rollback_registered(struct net_device *dev)
5880{
5881 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005882
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005883 list_add(&dev->unreg_list, &single);
5884 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005885 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005886}
5887
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005888static netdev_features_t netdev_fix_features(struct net_device *dev,
5889 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005890{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005891 /* Fix illegal checksum combinations */
5892 if ((features & NETIF_F_HW_CSUM) &&
5893 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005894 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005895 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5896 }
5897
Herbert Xub63365a2008-10-23 01:11:29 -07005898 /* Fix illegal SG+CSUM combinations. */
5899 if ((features & NETIF_F_SG) &&
5900 !(features & NETIF_F_ALL_CSUM)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005901 netdev_dbg(dev,
5902 "Dropping NETIF_F_SG since no checksum feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005903 features &= ~NETIF_F_SG;
5904 }
5905
5906 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005907 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005908 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005909 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005910 }
5911
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005912 /* TSO ECN requires that TSO is present as well. */
5913 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5914 features &= ~NETIF_F_TSO_ECN;
5915
Michał Mirosław212b5732011-02-15 16:59:16 +00005916 /* Software GSO depends on SG. */
5917 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005918 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005919 features &= ~NETIF_F_GSO;
5920 }
5921
Michał Mirosławacd11302011-01-24 15:45:15 -08005922 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005923 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005924 /* maybe split UFO into V4 and V6? */
5925 if (!((features & NETIF_F_GEN_CSUM) ||
5926 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5927 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005928 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005929 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005930 features &= ~NETIF_F_UFO;
5931 }
5932
5933 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005934 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005935 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005936 features &= ~NETIF_F_UFO;
5937 }
5938 }
5939
5940 return features;
5941}
Herbert Xub63365a2008-10-23 01:11:29 -07005942
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005943int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005944{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005945 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005946 int err = 0;
5947
Michał Mirosław87267482011-04-12 09:56:38 +00005948 ASSERT_RTNL();
5949
Michał Mirosław5455c692011-02-15 16:59:17 +00005950 features = netdev_get_wanted_features(dev);
5951
5952 if (dev->netdev_ops->ndo_fix_features)
5953 features = dev->netdev_ops->ndo_fix_features(dev, features);
5954
5955 /* driver might be less strict about feature dependencies */
5956 features = netdev_fix_features(dev, features);
5957
5958 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005959 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005960
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005961 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5962 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005963
5964 if (dev->netdev_ops->ndo_set_features)
5965 err = dev->netdev_ops->ndo_set_features(dev, features);
5966
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005967 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005968 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005969 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5970 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005971 return -1;
5972 }
5973
5974 if (!err)
5975 dev->features = features;
5976
5977 return 1;
5978}
5979
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005980/**
5981 * netdev_update_features - recalculate device features
5982 * @dev: the device to check
5983 *
5984 * Recalculate dev->features set and send notifications if it
5985 * has changed. Should be called after driver or hardware dependent
5986 * conditions might have changed that influence the features.
5987 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005988void netdev_update_features(struct net_device *dev)
5989{
5990 if (__netdev_update_features(dev))
5991 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005992}
5993EXPORT_SYMBOL(netdev_update_features);
5994
Linus Torvalds1da177e2005-04-16 15:20:36 -07005995/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005996 * netdev_change_features - recalculate device features
5997 * @dev: the device to check
5998 *
5999 * Recalculate dev->features set and send notifications even
6000 * if they have not changed. Should be called instead of
6001 * netdev_update_features() if also dev->vlan_features might
6002 * have changed to allow the changes to be propagated to stacked
6003 * VLAN devices.
6004 */
6005void netdev_change_features(struct net_device *dev)
6006{
6007 __netdev_update_features(dev);
6008 netdev_features_change(dev);
6009}
6010EXPORT_SYMBOL(netdev_change_features);
6011
6012/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006013 * netif_stacked_transfer_operstate - transfer operstate
6014 * @rootdev: the root or lower level device to transfer state from
6015 * @dev: the device to transfer operstate to
6016 *
6017 * Transfer operational state from root to device. This is normally
6018 * called when a stacking relationship exists between the root
6019 * device and the device(a leaf device).
6020 */
6021void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6022 struct net_device *dev)
6023{
6024 if (rootdev->operstate == IF_OPER_DORMANT)
6025 netif_dormant_on(dev);
6026 else
6027 netif_dormant_off(dev);
6028
6029 if (netif_carrier_ok(rootdev)) {
6030 if (!netif_carrier_ok(dev))
6031 netif_carrier_on(dev);
6032 } else {
6033 if (netif_carrier_ok(dev))
6034 netif_carrier_off(dev);
6035 }
6036}
6037EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6038
Tom Herbertbf264142010-11-26 08:36:09 +00006039#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006040static int netif_alloc_rx_queues(struct net_device *dev)
6041{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006042 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00006043 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006044
Tom Herbertbd25fa72010-10-18 18:00:16 +00006045 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006046
Tom Herbertbd25fa72010-10-18 18:00:16 +00006047 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6048 if (!rx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006049 pr_err("netdev: Unable to allocate %u rx queues\n", count);
Tom Herbertbd25fa72010-10-18 18:00:16 +00006050 return -ENOMEM;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006051 }
Tom Herbertbd25fa72010-10-18 18:00:16 +00006052 dev->_rx = rx;
6053
Tom Herbertbd25fa72010-10-18 18:00:16 +00006054 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00006055 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006056 return 0;
6057}
Tom Herbertbf264142010-11-26 08:36:09 +00006058#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00006059
Changli Gaoaa942102010-12-04 02:31:41 +00006060static void netdev_init_one_queue(struct net_device *dev,
6061 struct netdev_queue *queue, void *_unused)
6062{
6063 /* Initialize queue lock */
6064 spin_lock_init(&queue->_xmit_lock);
6065 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6066 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00006067 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00006068 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00006069#ifdef CONFIG_BQL
6070 dql_init(&queue->dql, HZ);
6071#endif
Changli Gaoaa942102010-12-04 02:31:41 +00006072}
6073
Tom Herberte6484932010-10-18 18:04:39 +00006074static int netif_alloc_netdev_queues(struct net_device *dev)
6075{
6076 unsigned int count = dev->num_tx_queues;
6077 struct netdev_queue *tx;
6078
6079 BUG_ON(count < 1);
6080
6081 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
6082 if (!tx) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006083 pr_err("netdev: Unable to allocate %u tx queues\n", count);
Tom Herberte6484932010-10-18 18:04:39 +00006084 return -ENOMEM;
6085 }
6086 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00006087
Tom Herberte6484932010-10-18 18:04:39 +00006088 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6089 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00006090
6091 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00006092}
6093
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08006094/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006095 * register_netdevice - register a network device
6096 * @dev: device to register
6097 *
6098 * Take a completed network device structure and add it to the kernel
6099 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6100 * chain. 0 is returned on success. A negative errno code is returned
6101 * on a failure to set up the device, or if the name is a duplicate.
6102 *
6103 * Callers must hold the rtnl semaphore. You may want
6104 * register_netdev() instead of this.
6105 *
6106 * BUGS:
6107 * The locking appears insufficient to guarantee two parallel registers
6108 * will not get the same name.
6109 */
6110
6111int register_netdevice(struct net_device *dev)
6112{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006113 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006114 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115
6116 BUG_ON(dev_boot_phase);
6117 ASSERT_RTNL();
6118
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006119 might_sleep();
6120
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121 /* When net_device's are persistent, this will be fatal. */
6122 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006123 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006124
David S. Millerf1f28aa2008-07-15 00:08:33 -07006125 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006126 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127
Linus Torvalds1da177e2005-04-16 15:20:36 -07006128 dev->iflink = -1;
6129
Gao feng828de4f2012-09-13 20:58:27 +00006130 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006131 if (ret < 0)
6132 goto out;
6133
Linus Torvalds1da177e2005-04-16 15:20:36 -07006134 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006135 if (dev->netdev_ops->ndo_init) {
6136 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006137 if (ret) {
6138 if (ret > 0)
6139 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006140 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006141 }
6142 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006143
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006144 ret = -EBUSY;
6145 if (!dev->ifindex)
6146 dev->ifindex = dev_new_index(net);
6147 else if (__dev_get_by_index(net, dev->ifindex))
6148 goto err_uninit;
6149
Linus Torvalds1da177e2005-04-16 15:20:36 -07006150 if (dev->iflink == -1)
6151 dev->iflink = dev->ifindex;
6152
Michał Mirosław5455c692011-02-15 16:59:17 +00006153 /* Transfer changeable features to wanted_features and enable
6154 * software offloads (GSO and GRO).
6155 */
6156 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006157 dev->features |= NETIF_F_SOFT_FEATURES;
6158 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006159
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006160 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00006161 if (!(dev->flags & IFF_LOOPBACK)) {
6162 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6163 if (dev->features & NETIF_F_ALL_CSUM) {
6164 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
6165 dev->features |= NETIF_F_NOCACHE_COPY;
6166 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006167 }
6168
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006169 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006170 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006171 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006172
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006173 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6174 ret = notifier_to_errno(ret);
6175 if (ret)
6176 goto err_uninit;
6177
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006178 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006179 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006180 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006181 dev->reg_state = NETREG_REGISTERED;
6182
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006183 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006184
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185 /*
6186 * Default initial state at registry is that the
6187 * device is present.
6188 */
6189
6190 set_bit(__LINK_STATE_PRESENT, &dev->state);
6191
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006192 linkwatch_init_dev(dev);
6193
Linus Torvalds1da177e2005-04-16 15:20:36 -07006194 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006195 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006196 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006197 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198
Jiri Pirko948b3372013-01-08 01:38:25 +00006199 /* If the device has permanent device address, driver should
6200 * set dev_addr and also addr_assign_type should be set to
6201 * NET_ADDR_PERM (default value).
6202 */
6203 if (dev->addr_assign_type == NET_ADDR_PERM)
6204 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6205
Linus Torvalds1da177e2005-04-16 15:20:36 -07006206 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006207 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006208 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006209 if (ret) {
6210 rollback_registered(dev);
6211 dev->reg_state = NETREG_UNREGISTERED;
6212 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006213 /*
6214 * Prevent userspace races by waiting until the network
6215 * device is fully setup before sending notifications.
6216 */
Patrick McHardya2835762010-02-26 06:34:51 +00006217 if (!dev->rtnl_link_ops ||
6218 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6219 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006220
6221out:
6222 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006223
6224err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006225 if (dev->netdev_ops->ndo_uninit)
6226 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006227 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006228}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006229EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006230
6231/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006232 * init_dummy_netdev - init a dummy network device for NAPI
6233 * @dev: device to init
6234 *
6235 * This takes a network device structure and initialize the minimum
6236 * amount of fields so it can be used to schedule NAPI polls without
6237 * registering a full blown interface. This is to be used by drivers
6238 * that need to tie several hardware interfaces to a single NAPI
6239 * poll scheduler due to HW limitations.
6240 */
6241int init_dummy_netdev(struct net_device *dev)
6242{
6243 /* Clear everything. Note we don't initialize spinlocks
6244 * are they aren't supposed to be taken by any of the
6245 * NAPI code and this dummy netdev is supposed to be
6246 * only ever used for NAPI polls
6247 */
6248 memset(dev, 0, sizeof(struct net_device));
6249
6250 /* make sure we BUG if trying to hit standard
6251 * register/unregister code path
6252 */
6253 dev->reg_state = NETREG_DUMMY;
6254
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006255 /* NAPI wants this */
6256 INIT_LIST_HEAD(&dev->napi_list);
6257
6258 /* a dummy interface is started by default */
6259 set_bit(__LINK_STATE_PRESENT, &dev->state);
6260 set_bit(__LINK_STATE_START, &dev->state);
6261
Eric Dumazet29b44332010-10-11 10:22:12 +00006262 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6263 * because users of this 'device' dont need to change
6264 * its refcount.
6265 */
6266
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006267 return 0;
6268}
6269EXPORT_SYMBOL_GPL(init_dummy_netdev);
6270
6271
6272/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006273 * register_netdev - register a network device
6274 * @dev: device to register
6275 *
6276 * Take a completed network device structure and add it to the kernel
6277 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6278 * chain. 0 is returned on success. A negative errno code is returned
6279 * on a failure to set up the device, or if the name is a duplicate.
6280 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006281 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006282 * and expands the device name if you passed a format string to
6283 * alloc_netdev.
6284 */
6285int register_netdev(struct net_device *dev)
6286{
6287 int err;
6288
6289 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006291 rtnl_unlock();
6292 return err;
6293}
6294EXPORT_SYMBOL(register_netdev);
6295
Eric Dumazet29b44332010-10-11 10:22:12 +00006296int netdev_refcnt_read(const struct net_device *dev)
6297{
6298 int i, refcnt = 0;
6299
6300 for_each_possible_cpu(i)
6301 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6302 return refcnt;
6303}
6304EXPORT_SYMBOL(netdev_refcnt_read);
6305
Ben Hutchings2c530402012-07-10 10:55:09 +00006306/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006307 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006308 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006309 *
6310 * This is called when unregistering network devices.
6311 *
6312 * Any protocol or device that holds a reference should register
6313 * for netdevice notification, and cleanup and put back the
6314 * reference if they receive an UNREGISTER event.
6315 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006316 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006317 */
6318static void netdev_wait_allrefs(struct net_device *dev)
6319{
6320 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006321 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006322
Eric Dumazete014deb2009-11-17 05:59:21 +00006323 linkwatch_forget_dev(dev);
6324
Linus Torvalds1da177e2005-04-16 15:20:36 -07006325 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006326 refcnt = netdev_refcnt_read(dev);
6327
6328 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006329 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006330 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006331
6332 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006333 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006334
Eric Dumazet748e2d92012-08-22 21:50:59 +00006335 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006336 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006337 rtnl_lock();
6338
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006339 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006340 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6341 &dev->state)) {
6342 /* We must not have linkwatch events
6343 * pending on unregister. If this
6344 * happens, we simply run the queue
6345 * unscheduled, resulting in a noop
6346 * for this device.
6347 */
6348 linkwatch_run_queue();
6349 }
6350
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006351 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352
6353 rebroadcast_time = jiffies;
6354 }
6355
6356 msleep(250);
6357
Eric Dumazet29b44332010-10-11 10:22:12 +00006358 refcnt = netdev_refcnt_read(dev);
6359
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006361 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6362 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363 warning_time = jiffies;
6364 }
6365 }
6366}
6367
6368/* The sequence is:
6369 *
6370 * rtnl_lock();
6371 * ...
6372 * register_netdevice(x1);
6373 * register_netdevice(x2);
6374 * ...
6375 * unregister_netdevice(y1);
6376 * unregister_netdevice(y2);
6377 * ...
6378 * rtnl_unlock();
6379 * free_netdev(y1);
6380 * free_netdev(y2);
6381 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006382 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006383 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006384 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006385 * without deadlocking with linkwatch via keventd.
6386 * 2) Since we run with the RTNL semaphore not held, we can sleep
6387 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006388 *
6389 * We must not return until all unregister events added during
6390 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006391 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006392void netdev_run_todo(void)
6393{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006394 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006395
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006397 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006398
6399 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006400
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006401
6402 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006403 if (!list_empty(&list))
6404 rcu_barrier();
6405
Linus Torvalds1da177e2005-04-16 15:20:36 -07006406 while (!list_empty(&list)) {
6407 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006408 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006409 list_del(&dev->todo_list);
6410
Eric Dumazet748e2d92012-08-22 21:50:59 +00006411 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006412 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006413 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006414
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006415 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006416 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006417 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006418 dump_stack();
6419 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006420 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006421
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006422 dev->reg_state = NETREG_UNREGISTERED;
6423
Changli Gao152102c2010-03-30 20:16:22 +00006424 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006425
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006426 netdev_wait_allrefs(dev);
6427
6428 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006429 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006430 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6431 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006432 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006433
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006434 if (dev->destructor)
6435 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006436
6437 /* Free network device */
6438 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006440}
6441
Ben Hutchings3cfde792010-07-09 09:11:52 +00006442/* Convert net_device_stats to rtnl_link_stats64. They have the same
6443 * fields in the same order, with only the type differing.
6444 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006445void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6446 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006447{
6448#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006449 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6450 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006451#else
6452 size_t i, n = sizeof(*stats64) / sizeof(u64);
6453 const unsigned long *src = (const unsigned long *)netdev_stats;
6454 u64 *dst = (u64 *)stats64;
6455
6456 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6457 sizeof(*stats64) / sizeof(u64));
6458 for (i = 0; i < n; i++)
6459 dst[i] = src[i];
6460#endif
6461}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006462EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006463
Eric Dumazetd83345a2009-11-16 03:36:51 +00006464/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006465 * dev_get_stats - get network device statistics
6466 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006467 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006468 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006469 * Get network statistics from device. Return @storage.
6470 * The device driver may provide its own method by setting
6471 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6472 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006473 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006474struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6475 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006476{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006477 const struct net_device_ops *ops = dev->netdev_ops;
6478
Eric Dumazet28172732010-07-07 14:58:56 -07006479 if (ops->ndo_get_stats64) {
6480 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006481 ops->ndo_get_stats64(dev, storage);
6482 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006483 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006484 } else {
6485 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006486 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006487 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006488 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006489}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006490EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006491
Eric Dumazet24824a02010-10-02 06:11:55 +00006492struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006493{
Eric Dumazet24824a02010-10-02 06:11:55 +00006494 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006495
Eric Dumazet24824a02010-10-02 06:11:55 +00006496#ifdef CONFIG_NET_CLS_ACT
6497 if (queue)
6498 return queue;
6499 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6500 if (!queue)
6501 return NULL;
6502 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006503 queue->qdisc = &noop_qdisc;
6504 queue->qdisc_sleeping = &noop_qdisc;
6505 rcu_assign_pointer(dev->ingress_queue, queue);
6506#endif
6507 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006508}
6509
Eric Dumazet2c60db02012-09-16 09:17:26 +00006510static const struct ethtool_ops default_ethtool_ops;
6511
Linus Torvalds1da177e2005-04-16 15:20:36 -07006512/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006513 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006514 * @sizeof_priv: size of private data to allocate space for
6515 * @name: device name format string
6516 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006517 * @txqs: the number of TX subqueues to allocate
6518 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519 *
6520 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006521 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006522 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006523 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006524struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6525 void (*setup)(struct net_device *),
6526 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006528 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006529 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006530 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006531
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006532 BUG_ON(strlen(name) >= sizeof(dev->name));
6533
Tom Herbert36909ea2011-01-09 19:36:31 +00006534 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006535 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006536 return NULL;
6537 }
6538
Tom Herbert36909ea2011-01-09 19:36:31 +00006539#ifdef CONFIG_RPS
6540 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006541 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006542 return NULL;
6543 }
6544#endif
6545
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006546 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006547 if (sizeof_priv) {
6548 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006549 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006550 alloc_size += sizeof_priv;
6551 }
6552 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006553 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006554
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07006555 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556 if (!p) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006557 pr_err("alloc_netdev: Unable to allocate device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558 return NULL;
6559 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006560
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006561 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006562 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006563
Eric Dumazet29b44332010-10-11 10:22:12 +00006564 dev->pcpu_refcnt = alloc_percpu(int);
6565 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00006566 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006567
Linus Torvalds1da177e2005-04-16 15:20:36 -07006568 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006569 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570
Jiri Pirko22bedad32010-04-01 21:22:57 +00006571 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006572 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006573
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006574 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006575
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006576 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006577 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578
Herbert Xud565b0a2008-12-15 23:38:52 -08006579 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006580 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006581 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006582 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006583 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006584 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006585
6586 dev->num_tx_queues = txqs;
6587 dev->real_num_tx_queues = txqs;
6588 if (netif_alloc_netdev_queues(dev))
6589 goto free_all;
6590
6591#ifdef CONFIG_RPS
6592 dev->num_rx_queues = rxqs;
6593 dev->real_num_rx_queues = rxqs;
6594 if (netif_alloc_rx_queues(dev))
6595 goto free_all;
6596#endif
6597
Linus Torvalds1da177e2005-04-16 15:20:36 -07006598 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006599 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006600 if (!dev->ethtool_ops)
6601 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006602 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006603
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006604free_all:
6605 free_netdev(dev);
6606 return NULL;
6607
Eric Dumazet29b44332010-10-11 10:22:12 +00006608free_pcpu:
6609 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00006610 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006611#ifdef CONFIG_RPS
6612 kfree(dev->_rx);
6613#endif
6614
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006615free_p:
6616 kfree(p);
6617 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006618}
Tom Herbert36909ea2011-01-09 19:36:31 +00006619EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006620
6621/**
6622 * free_netdev - free network device
6623 * @dev: device
6624 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006625 * This function does the last stage of destroying an allocated device
6626 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006627 * If this is the last reference then it will be freed.
6628 */
6629void free_netdev(struct net_device *dev)
6630{
Herbert Xud565b0a2008-12-15 23:38:52 -08006631 struct napi_struct *p, *n;
6632
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006633 release_net(dev_net(dev));
6634
David S. Millere8a04642008-07-17 00:34:19 -07006635 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00006636#ifdef CONFIG_RPS
6637 kfree(dev->_rx);
6638#endif
David S. Millere8a04642008-07-17 00:34:19 -07006639
Eric Dumazet33d480c2011-08-11 19:30:52 +00006640 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006641
Jiri Pirkof001fde2009-05-05 02:48:28 +00006642 /* Flush device addresses */
6643 dev_addr_flush(dev);
6644
Herbert Xud565b0a2008-12-15 23:38:52 -08006645 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6646 netif_napi_del(p);
6647
Eric Dumazet29b44332010-10-11 10:22:12 +00006648 free_percpu(dev->pcpu_refcnt);
6649 dev->pcpu_refcnt = NULL;
6650
Stephen Hemminger3041a062006-05-26 13:25:24 -07006651 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006652 if (dev->reg_state == NETREG_UNINITIALIZED) {
6653 kfree((char *)dev - dev->padded);
6654 return;
6655 }
6656
6657 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6658 dev->reg_state = NETREG_RELEASED;
6659
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006660 /* will free via device release */
6661 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006663EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006664
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006665/**
6666 * synchronize_net - Synchronize with packet receive processing
6667 *
6668 * Wait for packets currently being received to be done.
6669 * Does not block later packets from starting.
6670 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006671void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672{
6673 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006674 if (rtnl_is_locked())
6675 synchronize_rcu_expedited();
6676 else
6677 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006679EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680
6681/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006682 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006684 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006685 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006686 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006687 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006688 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006689 *
6690 * Callers must hold the rtnl semaphore. You may want
6691 * unregister_netdev() instead of this.
6692 */
6693
Eric Dumazet44a08732009-10-27 07:03:04 +00006694void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006695{
Herbert Xua6620712007-12-12 19:21:56 -08006696 ASSERT_RTNL();
6697
Eric Dumazet44a08732009-10-27 07:03:04 +00006698 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006699 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006700 } else {
6701 rollback_registered(dev);
6702 /* Finish processing unregister after unlock */
6703 net_set_todo(dev);
6704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705}
Eric Dumazet44a08732009-10-27 07:03:04 +00006706EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006707
6708/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006709 * unregister_netdevice_many - unregister many devices
6710 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006711 */
6712void unregister_netdevice_many(struct list_head *head)
6713{
6714 struct net_device *dev;
6715
6716 if (!list_empty(head)) {
6717 rollback_registered_many(head);
6718 list_for_each_entry(dev, head, unreg_list)
6719 net_set_todo(dev);
6720 }
6721}
Eric Dumazet63c80992009-10-27 07:06:49 +00006722EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006723
6724/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725 * unregister_netdev - remove device from the kernel
6726 * @dev: device
6727 *
6728 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006729 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006730 *
6731 * This is just a wrapper for unregister_netdevice that takes
6732 * the rtnl semaphore. In general you want to use this and not
6733 * unregister_netdevice.
6734 */
6735void unregister_netdev(struct net_device *dev)
6736{
6737 rtnl_lock();
6738 unregister_netdevice(dev);
6739 rtnl_unlock();
6740}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006741EXPORT_SYMBOL(unregister_netdev);
6742
Eric W. Biedermance286d32007-09-12 13:53:49 +02006743/**
6744 * dev_change_net_namespace - move device to different nethost namespace
6745 * @dev: device
6746 * @net: network namespace
6747 * @pat: If not NULL name pattern to try if the current device name
6748 * is already taken in the destination network namespace.
6749 *
6750 * This function shuts down a device interface and moves it
6751 * to a new network namespace. On success 0 is returned, on
6752 * a failure a netagive errno code is returned.
6753 *
6754 * Callers must hold the rtnl semaphore.
6755 */
6756
6757int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6758{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006759 int err;
6760
6761 ASSERT_RTNL();
6762
6763 /* Don't allow namespace local devices to be moved. */
6764 err = -EINVAL;
6765 if (dev->features & NETIF_F_NETNS_LOCAL)
6766 goto out;
6767
6768 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006769 if (dev->reg_state != NETREG_REGISTERED)
6770 goto out;
6771
6772 /* Get out if there is nothing todo */
6773 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006774 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006775 goto out;
6776
6777 /* Pick the destination device name, and ensure
6778 * we can use it in the destination network namespace.
6779 */
6780 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006781 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006782 /* We get here if we can't use the current device name */
6783 if (!pat)
6784 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006785 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006786 goto out;
6787 }
6788
6789 /*
6790 * And now a mini version of register_netdevice unregister_netdevice.
6791 */
6792
6793 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006794 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006795
6796 /* And unlink it from device chain */
6797 err = -ENODEV;
6798 unlist_netdevice(dev);
6799
6800 synchronize_net();
6801
6802 /* Shutdown queueing discipline. */
6803 dev_shutdown(dev);
6804
6805 /* Notify protocols, that we are about to destroy
6806 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006807
6808 Note that dev->reg_state stays at NETREG_REGISTERED.
6809 This is wanted because this way 8021q and macvlan know
6810 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006811 */
6812 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006813 rcu_barrier();
6814 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00006815 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006816
6817 /*
6818 * Flush the unicast and multicast chains
6819 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006820 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006821 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006822
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006823 /* Send a netdev-removed uevent to the old namespace */
6824 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6825
Eric W. Biedermance286d32007-09-12 13:53:49 +02006826 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006827 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006828
Eric W. Biedermance286d32007-09-12 13:53:49 +02006829 /* If there is an ifindex conflict assign a new one */
6830 if (__dev_get_by_index(net, dev->ifindex)) {
6831 int iflink = (dev->iflink == dev->ifindex);
6832 dev->ifindex = dev_new_index(net);
6833 if (iflink)
6834 dev->iflink = dev->ifindex;
6835 }
6836
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006837 /* Send a netdev-add uevent to the new namespace */
6838 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6839
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006840 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006841 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006842 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006843
6844 /* Add the device back in the hashes */
6845 list_netdevice(dev);
6846
6847 /* Notify protocols, that a new device appeared. */
6848 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6849
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006850 /*
6851 * Prevent userspace races by waiting until the network
6852 * device is fully setup before sending notifications.
6853 */
6854 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6855
Eric W. Biedermance286d32007-09-12 13:53:49 +02006856 synchronize_net();
6857 err = 0;
6858out:
6859 return err;
6860}
Johannes Berg463d0182009-07-14 00:33:35 +02006861EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006862
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863static int dev_cpu_callback(struct notifier_block *nfb,
6864 unsigned long action,
6865 void *ocpu)
6866{
6867 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006868 struct sk_buff *skb;
6869 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6870 struct softnet_data *sd, *oldsd;
6871
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006872 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006873 return NOTIFY_OK;
6874
6875 local_irq_disable();
6876 cpu = smp_processor_id();
6877 sd = &per_cpu(softnet_data, cpu);
6878 oldsd = &per_cpu(softnet_data, oldcpu);
6879
6880 /* Find end of our completion_queue. */
6881 list_skb = &sd->completion_queue;
6882 while (*list_skb)
6883 list_skb = &(*list_skb)->next;
6884 /* Append completion queue from offline CPU. */
6885 *list_skb = oldsd->completion_queue;
6886 oldsd->completion_queue = NULL;
6887
Linus Torvalds1da177e2005-04-16 15:20:36 -07006888 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006889 if (oldsd->output_queue) {
6890 *sd->output_queue_tailp = oldsd->output_queue;
6891 sd->output_queue_tailp = oldsd->output_queue_tailp;
6892 oldsd->output_queue = NULL;
6893 oldsd->output_queue_tailp = &oldsd->output_queue;
6894 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006895 /* Append NAPI poll list from offline CPU. */
6896 if (!list_empty(&oldsd->poll_list)) {
6897 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6898 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006900
6901 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6902 local_irq_enable();
6903
6904 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006905 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6906 netif_rx(skb);
6907 input_queue_head_incr(oldsd);
6908 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006909 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006910 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006911 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006912 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006913
6914 return NOTIFY_OK;
6915}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916
6917
Herbert Xu7f353bf2007-08-10 15:47:58 -07006918/**
Herbert Xub63365a2008-10-23 01:11:29 -07006919 * netdev_increment_features - increment feature set by one
6920 * @all: current feature set
6921 * @one: new feature set
6922 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006923 *
6924 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006925 * @one to the master device with current feature set @all. Will not
6926 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006927 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006928netdev_features_t netdev_increment_features(netdev_features_t all,
6929 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006930{
Michał Mirosław1742f182011-04-22 06:31:16 +00006931 if (mask & NETIF_F_GEN_CSUM)
6932 mask |= NETIF_F_ALL_CSUM;
6933 mask |= NETIF_F_VLAN_CHALLENGED;
6934
6935 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6936 all &= one | ~NETIF_F_ALL_FOR_ALL;
6937
Michał Mirosław1742f182011-04-22 06:31:16 +00006938 /* If one device supports hw checksumming, set for all. */
6939 if (all & NETIF_F_GEN_CSUM)
6940 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006941
6942 return all;
6943}
Herbert Xub63365a2008-10-23 01:11:29 -07006944EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006945
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006946static struct hlist_head *netdev_create_hash(void)
6947{
6948 int i;
6949 struct hlist_head *hash;
6950
6951 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6952 if (hash != NULL)
6953 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6954 INIT_HLIST_HEAD(&hash[i]);
6955
6956 return hash;
6957}
6958
Eric W. Biederman881d9662007-09-17 11:56:21 -07006959/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006960static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006961{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006962 if (net != &init_net)
6963 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006964
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006965 net->dev_name_head = netdev_create_hash();
6966 if (net->dev_name_head == NULL)
6967 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006968
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006969 net->dev_index_head = netdev_create_hash();
6970 if (net->dev_index_head == NULL)
6971 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006972
6973 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006974
6975err_idx:
6976 kfree(net->dev_name_head);
6977err_name:
6978 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006979}
6980
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006981/**
6982 * netdev_drivername - network driver for the device
6983 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006984 *
6985 * Determine network driver for device.
6986 */
David S. Miller3019de12011-06-06 16:41:33 -07006987const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006988{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006989 const struct device_driver *driver;
6990 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006991 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006992
6993 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006994 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006995 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006996
6997 driver = parent->driver;
6998 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006999 return driver->name;
7000 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07007001}
7002
Joe Perchesb004ff42012-09-12 20:12:19 -07007003static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00007004 struct va_format *vaf)
7005{
7006 int r;
7007
Joe Perchesb004ff42012-09-12 20:12:19 -07007008 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07007009 r = dev_printk_emit(level[1] - '0',
7010 dev->dev.parent,
7011 "%s %s %s: %pV",
7012 dev_driver_string(dev->dev.parent),
7013 dev_name(dev->dev.parent),
7014 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007015 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00007016 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007017 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00007018 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007019 }
Joe Perches256df2f2010-06-27 01:02:35 +00007020
7021 return r;
7022}
7023
7024int netdev_printk(const char *level, const struct net_device *dev,
7025 const char *format, ...)
7026{
7027 struct va_format vaf;
7028 va_list args;
7029 int r;
7030
7031 va_start(args, format);
7032
7033 vaf.fmt = format;
7034 vaf.va = &args;
7035
7036 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07007037
Joe Perches256df2f2010-06-27 01:02:35 +00007038 va_end(args);
7039
7040 return r;
7041}
7042EXPORT_SYMBOL(netdev_printk);
7043
7044#define define_netdev_printk_level(func, level) \
7045int func(const struct net_device *dev, const char *fmt, ...) \
7046{ \
7047 int r; \
7048 struct va_format vaf; \
7049 va_list args; \
7050 \
7051 va_start(args, fmt); \
7052 \
7053 vaf.fmt = fmt; \
7054 vaf.va = &args; \
7055 \
7056 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007057 \
Joe Perches256df2f2010-06-27 01:02:35 +00007058 va_end(args); \
7059 \
7060 return r; \
7061} \
7062EXPORT_SYMBOL(func);
7063
7064define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7065define_netdev_printk_level(netdev_alert, KERN_ALERT);
7066define_netdev_printk_level(netdev_crit, KERN_CRIT);
7067define_netdev_printk_level(netdev_err, KERN_ERR);
7068define_netdev_printk_level(netdev_warn, KERN_WARNING);
7069define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7070define_netdev_printk_level(netdev_info, KERN_INFO);
7071
Pavel Emelyanov46650792007-10-08 20:38:39 -07007072static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007073{
7074 kfree(net->dev_name_head);
7075 kfree(net->dev_index_head);
7076}
7077
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007078static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007079 .init = netdev_init,
7080 .exit = netdev_exit,
7081};
7082
Pavel Emelyanov46650792007-10-08 20:38:39 -07007083static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007084{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007085 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007086 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007087 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007088 * initial network namespace
7089 */
7090 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007091 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007092 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007093 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007094
7095 /* Ignore unmoveable devices (i.e. loopback) */
7096 if (dev->features & NETIF_F_NETNS_LOCAL)
7097 continue;
7098
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007099 /* Leave virtual devices for the generic cleanup */
7100 if (dev->rtnl_link_ops)
7101 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007102
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007103 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007104 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7105 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007106 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007107 pr_emerg("%s: failed to move %s to init_net: %d\n",
7108 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007109 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007110 }
7111 }
7112 rtnl_unlock();
7113}
7114
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007115static void __net_exit default_device_exit_batch(struct list_head *net_list)
7116{
7117 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007118 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007119 * Do this across as many network namespaces as possible to
7120 * improve batching efficiency.
7121 */
7122 struct net_device *dev;
7123 struct net *net;
7124 LIST_HEAD(dev_kill_list);
7125
7126 rtnl_lock();
7127 list_for_each_entry(net, net_list, exit_list) {
7128 for_each_netdev_reverse(net, dev) {
7129 if (dev->rtnl_link_ops)
7130 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7131 else
7132 unregister_netdevice_queue(dev, &dev_kill_list);
7133 }
7134 }
7135 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00007136 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007137 rtnl_unlock();
7138}
7139
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007140static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007141 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007142 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007143};
7144
Linus Torvalds1da177e2005-04-16 15:20:36 -07007145/*
7146 * Initialize the DEV module. At boot time this walks the device list and
7147 * unhooks any devices that fail to initialise (normally hardware not
7148 * present) and leaves us with a valid list of present and active devices.
7149 *
7150 */
7151
7152/*
7153 * This is called single threaded during boot, so no need
7154 * to take the rtnl semaphore.
7155 */
7156static int __init net_dev_init(void)
7157{
7158 int i, rc = -ENOMEM;
7159
7160 BUG_ON(!dev_boot_phase);
7161
Linus Torvalds1da177e2005-04-16 15:20:36 -07007162 if (dev_proc_init())
7163 goto out;
7164
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007165 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007166 goto out;
7167
7168 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007169 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170 INIT_LIST_HEAD(&ptype_base[i]);
7171
Vlad Yasevich62532da2012-11-15 08:49:10 +00007172 INIT_LIST_HEAD(&offload_base);
7173
Eric W. Biederman881d9662007-09-17 11:56:21 -07007174 if (register_pernet_subsys(&netdev_net_ops))
7175 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176
7177 /*
7178 * Initialise the packet receive queues.
7179 */
7180
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007181 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007182 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007183
Changli Gaodee42872010-05-02 05:42:16 +00007184 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007185 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007186 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007187 sd->completion_queue = NULL;
7188 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007189 sd->output_queue = NULL;
7190 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007191#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007192 sd->csd.func = rps_trigger_softirq;
7193 sd->csd.info = sd;
7194 sd->csd.flags = 0;
7195 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007196#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007197
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007198 sd->backlog.poll = process_backlog;
7199 sd->backlog.weight = weight_p;
7200 sd->backlog.gro_list = NULL;
7201 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202 }
7203
Linus Torvalds1da177e2005-04-16 15:20:36 -07007204 dev_boot_phase = 0;
7205
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007206 /* The loopback device is special if any other network devices
7207 * is present in a network namespace the loopback device must
7208 * be present. Since we now dynamically allocate and free the
7209 * loopback device ensure this invariant is maintained by
7210 * keeping the loopback device as the first device on the
7211 * list of network devices. Ensuring the loopback devices
7212 * is the first device that appears and the last network device
7213 * that disappears.
7214 */
7215 if (register_pernet_device(&loopback_net_ops))
7216 goto out;
7217
7218 if (register_pernet_device(&default_device_ops))
7219 goto out;
7220
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007221 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7222 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007223
7224 hotcpu_notifier(dev_cpu_callback, 0);
7225 dst_init();
7226 dev_mcast_init();
7227 rc = 0;
7228out:
7229 return rc;
7230}
7231
7232subsys_initcall(net_dev_init);
7233
Krishna Kumare88721f2009-02-18 17:55:02 -08007234static int __init initialize_hashrnd(void)
7235{
Tom Herbert0a9627f2010-03-16 08:03:29 +00007236 get_random_bytes(&hashrnd, sizeof(hashrnd));
Krishna Kumare88721f2009-02-18 17:55:02 -08007237 return 0;
7238}
7239
7240late_initcall_sync(initialize_hashrnd);
7241