| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * 	NET3	Protocol independent device support routines. | 
 | 3 |  * | 
 | 4 |  *		This program is free software; you can redistribute it and/or | 
 | 5 |  *		modify it under the terms of the GNU General Public License | 
 | 6 |  *		as published by the Free Software Foundation; either version | 
 | 7 |  *		2 of the License, or (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  *	Derived from the non IP parts of dev.c 1.0.19 | 
| Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 |  * 		Authors:	Ross Biro | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
 | 12 |  *				Mark Evans, <evansmp@uhura.aston.ac.uk> | 
 | 13 |  * | 
 | 14 |  *	Additional Authors: | 
 | 15 |  *		Florian la Roche <rzsfl@rz.uni-sb.de> | 
 | 16 |  *		Alan Cox <gw4pts@gw4pts.ampr.org> | 
 | 17 |  *		David Hinds <dahinds@users.sourceforge.net> | 
 | 18 |  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | 
 | 19 |  *		Adam Sulmicki <adam@cfar.umd.edu> | 
 | 20 |  *              Pekka Riikonen <priikone@poesidon.pspt.fi> | 
 | 21 |  * | 
 | 22 |  *	Changes: | 
 | 23 |  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set | 
 | 24 |  *              			to 2 if register_netdev gets called | 
 | 25 |  *              			before net_dev_init & also removed a | 
 | 26 |  *              			few lines of code in the process. | 
 | 27 |  *		Alan Cox	:	device private ioctl copies fields back. | 
 | 28 |  *		Alan Cox	:	Transmit queue code does relevant | 
 | 29 |  *					stunts to keep the queue safe. | 
 | 30 |  *		Alan Cox	:	Fixed double lock. | 
 | 31 |  *		Alan Cox	:	Fixed promisc NULL pointer trap | 
 | 32 |  *		????????	:	Support the full private ioctl range | 
 | 33 |  *		Alan Cox	:	Moved ioctl permission check into | 
 | 34 |  *					drivers | 
 | 35 |  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI | 
 | 36 |  *		Alan Cox	:	100 backlog just doesn't cut it when | 
 | 37 |  *					you start doing multicast video 8) | 
 | 38 |  *		Alan Cox	:	Rewrote net_bh and list manager. | 
 | 39 |  *		Alan Cox	: 	Fix ETH_P_ALL echoback lengths. | 
 | 40 |  *		Alan Cox	:	Took out transmit every packet pass | 
 | 41 |  *					Saved a few bytes in the ioctl handler | 
 | 42 |  *		Alan Cox	:	Network driver sets packet type before | 
 | 43 |  *					calling netif_rx. Saves a function | 
 | 44 |  *					call a packet. | 
 | 45 |  *		Alan Cox	:	Hashed net_bh() | 
 | 46 |  *		Richard Kooijman:	Timestamp fixes. | 
 | 47 |  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR | 
 | 48 |  *		Alan Cox	:	Device lock protection. | 
 | 49 |  *		Alan Cox	: 	Fixed nasty side effect of device close | 
 | 50 |  *					changes. | 
 | 51 |  *		Rudi Cilibrasi	:	Pass the right thing to | 
 | 52 |  *					set_mac_address() | 
 | 53 |  *		Dave Miller	:	32bit quantity for the device lock to | 
 | 54 |  *					make it work out on a Sparc. | 
 | 55 |  *		Bjorn Ekwall	:	Added KERNELD hack. | 
 | 56 |  *		Alan Cox	:	Cleaned up the backlog initialise. | 
 | 57 |  *		Craig Metz	:	SIOCGIFCONF fix if space for under | 
 | 58 |  *					1 device. | 
 | 59 |  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there | 
 | 60 |  *					is no device open function. | 
 | 61 |  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF | 
 | 62 |  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF | 
 | 63 |  *		Cyrus Durgin	:	Cleaned for KMOD | 
 | 64 |  *		Adam Sulmicki   :	Bug Fix : Network Device Unload | 
 | 65 |  *					A network device unload needs to purge | 
 | 66 |  *					the backlog queue. | 
 | 67 |  *	Paul Rusty Russell	:	SIOCSIFNAME | 
 | 68 |  *              Pekka Riikonen  :	Netdev boot-time settings code | 
 | 69 |  *              Andrew Morton   :       Make unregister_netdevice wait | 
 | 70 |  *              			indefinitely on dev->refcnt | 
 | 71 |  * 		J Hadi Salim	:	- Backlog queue sampling | 
 | 72 |  *				        - netif_rx() feedback | 
 | 73 |  */ | 
 | 74 |  | 
 | 75 | #include <asm/uaccess.h> | 
 | 76 | #include <asm/system.h> | 
 | 77 | #include <linux/bitops.h> | 
| Randy Dunlap | 4fc268d | 2006-01-11 12:17:47 -0800 | [diff] [blame] | 78 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | #include <linux/cpu.h> | 
 | 80 | #include <linux/types.h> | 
 | 81 | #include <linux/kernel.h> | 
| stephen hemminger | 08e9897 | 2009-11-10 07:20:34 +0000 | [diff] [blame] | 82 | #include <linux/hash.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 83 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | #include <linux/sched.h> | 
| Arjan van de Ven | 4a3e2f7 | 2006-03-20 22:33:17 -0800 | [diff] [blame] | 85 | #include <linux/mutex.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | #include <linux/string.h> | 
 | 87 | #include <linux/mm.h> | 
 | 88 | #include <linux/socket.h> | 
 | 89 | #include <linux/sockios.h> | 
 | 90 | #include <linux/errno.h> | 
 | 91 | #include <linux/interrupt.h> | 
 | 92 | #include <linux/if_ether.h> | 
 | 93 | #include <linux/netdevice.h> | 
 | 94 | #include <linux/etherdevice.h> | 
| Ben Hutchings | 0187bdf | 2008-06-19 16:15:47 -0700 | [diff] [blame] | 95 | #include <linux/ethtool.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #include <linux/notifier.h> | 
 | 97 | #include <linux/skbuff.h> | 
| Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 98 | #include <net/net_namespace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | #include <net/sock.h> | 
 | 100 | #include <linux/rtnetlink.h> | 
 | 101 | #include <linux/proc_fs.h> | 
 | 102 | #include <linux/seq_file.h> | 
 | 103 | #include <linux/stat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #include <net/dst.h> | 
 | 105 | #include <net/pkt_sched.h> | 
 | 106 | #include <net/checksum.h> | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 107 | #include <net/xfrm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | #include <linux/highmem.h> | 
 | 109 | #include <linux/init.h> | 
 | 110 | #include <linux/kmod.h> | 
 | 111 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | #include <linux/netpoll.h> | 
 | 113 | #include <linux/rcupdate.h> | 
 | 114 | #include <linux/delay.h> | 
| Johannes Berg | 295f4a1 | 2007-04-26 20:43:56 -0700 | [diff] [blame] | 115 | #include <net/wext.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | #include <net/iw_handler.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | #include <asm/current.h> | 
| Steve Grubb | 5bdb988 | 2005-12-03 08:39:35 -0500 | [diff] [blame] | 118 | #include <linux/audit.h> | 
| Chris Leech | db21733 | 2006-06-17 21:24:58 -0700 | [diff] [blame] | 119 | #include <linux/dmaengine.h> | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 120 | #include <linux/err.h> | 
| David S. Miller | c7fa9d1 | 2006-08-15 16:34:13 -0700 | [diff] [blame] | 121 | #include <linux/ctype.h> | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 122 | #include <linux/if_arp.h> | 
| Ben Hutchings | 6de329e | 2008-06-16 17:02:28 -0700 | [diff] [blame] | 123 | #include <linux/if_vlan.h> | 
| David S. Miller | 8f0f222 | 2008-07-15 03:47:03 -0700 | [diff] [blame] | 124 | #include <linux/ip.h> | 
| Alexander Duyck | ad55dca | 2008-09-20 22:05:50 -0700 | [diff] [blame] | 125 | #include <net/ip.h> | 
| David S. Miller | 8f0f222 | 2008-07-15 03:47:03 -0700 | [diff] [blame] | 126 | #include <linux/ipv6.h> | 
 | 127 | #include <linux/in.h> | 
| David S. Miller | b6b2fed | 2008-07-21 09:48:06 -0700 | [diff] [blame] | 128 | #include <linux/jhash.h> | 
 | 129 | #include <linux/random.h> | 
| David S. Miller | 9cbc1cb | 2009-06-15 03:02:23 -0700 | [diff] [blame] | 130 | #include <trace/events/napi.h> | 
| Koki Sanagi | cf66ba5 | 2010-08-23 18:45:02 +0900 | [diff] [blame] | 131 | #include <trace/events/net.h> | 
| Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 132 | #include <trace/events/skb.h> | 
| FUJITA Tomonori | 5acbbd4 | 2010-03-30 22:35:50 +0000 | [diff] [blame] | 133 | #include <linux/pci.h> | 
| Stephen Rothwell | caeda9b | 2010-09-16 21:39:16 -0700 | [diff] [blame] | 134 | #include <linux/inetdevice.h> | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 135 | #include <linux/cpu_rmap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
| Pavel Emelyanov | 342709e | 2007-10-23 21:14:45 -0700 | [diff] [blame] | 137 | #include "net-sysfs.h" | 
 | 138 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 139 | /* Instead of increasing this, you should create a hash table. */ | 
 | 140 | #define MAX_GRO_SKBS 8 | 
 | 141 |  | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 142 | /* This should be increased if a protocol with a bigger head is added. */ | 
 | 143 | #define GRO_MAX_HEAD (MAX_HEADER + 128) | 
 | 144 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | /* | 
 | 146 |  *	The list of packet types we will receive (as opposed to discard) | 
 | 147 |  *	and the routines to invoke. | 
 | 148 |  * | 
 | 149 |  *	Why 16. Because with 16 the only overlap we get on a hash of the | 
 | 150 |  *	low nibble of the protocol value is RARP/SNAP/X.25. | 
 | 151 |  * | 
 | 152 |  *      NOTE:  That is no longer true with the addition of VLAN tags.  Not | 
 | 153 |  *             sure which should go first, but I bet it won't make much | 
 | 154 |  *             difference if we are running VLANs.  The good news is that | 
 | 155 |  *             this protocol won't be in the list unless compiled in, so | 
| Stephen Hemminger | 3041a06 | 2006-05-26 13:25:24 -0700 | [diff] [blame] | 156 |  *             the average user (w/out VLANs) will not be adversely affected. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 |  *             --BLG | 
 | 158 |  * | 
 | 159 |  *		0800	IP | 
 | 160 |  *		8100    802.1Q VLAN | 
 | 161 |  *		0001	802.3 | 
 | 162 |  *		0002	AX.25 | 
 | 163 |  *		0004	802.2 | 
 | 164 |  *		8035	RARP | 
 | 165 |  *		0005	SNAP | 
 | 166 |  *		0805	X.25 | 
 | 167 |  *		0806	ARP | 
 | 168 |  *		8137	IPX | 
 | 169 |  *		0009	Localtalk | 
 | 170 |  *		86DD	IPv6 | 
 | 171 |  */ | 
 | 172 |  | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 173 | #define PTYPE_HASH_SIZE	(16) | 
 | 174 | #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1) | 
 | 175 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | static DEFINE_SPINLOCK(ptype_lock); | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 177 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | 
| Stephen Hemminger | 6b2bedc | 2007-03-12 14:33:50 -0700 | [diff] [blame] | 178 | static struct list_head ptype_all __read_mostly;	/* Taps */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | /* | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 181 |  * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 |  * semaphore. | 
 | 183 |  * | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 184 |  * Pure readers hold dev_base_lock for reading, or rcu_read_lock() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 |  * | 
 | 186 |  * Writers must hold the rtnl semaphore while they loop through the | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 187 |  * dev_base_head list, and hold dev_base_lock for writing when they do the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  * actual updates.  This allows pure readers to access the list even | 
 | 189 |  * while a writer is preparing to update it. | 
 | 190 |  * | 
 | 191 |  * To put it another way, dev_base_lock is held for writing only to | 
 | 192 |  * protect against pure readers; the rtnl semaphore provides the | 
 | 193 |  * protection against other writers. | 
 | 194 |  * | 
 | 195 |  * See, for example usages, register_netdevice() and | 
 | 196 |  * unregister_netdevice(), which must be called with the rtnl | 
 | 197 |  * semaphore held. | 
 | 198 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | DEFINE_RWLOCK(dev_base_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | EXPORT_SYMBOL(dev_base_lock); | 
 | 201 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 202 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | { | 
 | 204 | 	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); | 
| stephen hemminger | 08e9897 | 2009-11-10 07:20:34 +0000 | [diff] [blame] | 205 | 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | } | 
 | 207 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 208 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | { | 
| Eric Dumazet | 7c28bd0 | 2009-10-24 06:13:17 -0700 | [diff] [blame] | 210 | 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | } | 
 | 212 |  | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 213 | static inline void rps_lock(struct softnet_data *sd) | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 214 | { | 
 | 215 | #ifdef CONFIG_RPS | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 216 | 	spin_lock(&sd->input_pkt_queue.lock); | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 217 | #endif | 
 | 218 | } | 
 | 219 |  | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 220 | static inline void rps_unlock(struct softnet_data *sd) | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 221 | { | 
 | 222 | #ifdef CONFIG_RPS | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 223 | 	spin_unlock(&sd->input_pkt_queue.lock); | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 224 | #endif | 
 | 225 | } | 
 | 226 |  | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 227 | /* Device list insertion */ | 
 | 228 | static int list_netdevice(struct net_device *dev) | 
 | 229 | { | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 230 | 	struct net *net = dev_net(dev); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 231 |  | 
 | 232 | 	ASSERT_RTNL(); | 
 | 233 |  | 
 | 234 | 	write_lock_bh(&dev_base_lock); | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 235 | 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 236 | 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 237 | 	hlist_add_head_rcu(&dev->index_hlist, | 
 | 238 | 			   dev_index_hash(net, dev->ifindex)); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 239 | 	write_unlock_bh(&dev_base_lock); | 
 | 240 | 	return 0; | 
 | 241 | } | 
 | 242 |  | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 243 | /* Device list removal | 
 | 244 |  * caller must respect a RCU grace period before freeing/reusing dev | 
 | 245 |  */ | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 246 | static void unlist_netdevice(struct net_device *dev) | 
 | 247 | { | 
 | 248 | 	ASSERT_RTNL(); | 
 | 249 |  | 
 | 250 | 	/* Unlink dev from the device chain */ | 
 | 251 | 	write_lock_bh(&dev_base_lock); | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 252 | 	list_del_rcu(&dev->dev_list); | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 253 | 	hlist_del_rcu(&dev->name_hlist); | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 254 | 	hlist_del_rcu(&dev->index_hlist); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 255 | 	write_unlock_bh(&dev_base_lock); | 
 | 256 | } | 
 | 257 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | /* | 
 | 259 |  *	Our notifier list | 
 | 260 |  */ | 
 | 261 |  | 
| Alan Stern | f07d5b9 | 2006-05-09 15:23:03 -0700 | [diff] [blame] | 262 | static RAW_NOTIFIER_HEAD(netdev_chain); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 |  | 
 | 264 | /* | 
 | 265 |  *	Device drivers call our routines to queue packets here. We empty the | 
 | 266 |  *	queue in the local softnet handler. | 
 | 267 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 268 |  | 
| Eric Dumazet | 9958da0 | 2010-04-17 04:17:02 +0000 | [diff] [blame] | 269 | DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 270 | EXPORT_PER_CPU_SYMBOL(softnet_data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 |  | 
| David S. Miller | cf508b1 | 2008-07-22 14:16:42 -0700 | [diff] [blame] | 272 | #ifdef CONFIG_LOCKDEP | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 273 | /* | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 274 |  * register_netdevice() inits txq->_xmit_lock and sets lockdep class | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 275 |  * according to dev->type | 
 | 276 |  */ | 
 | 277 | static const unsigned short netdev_lock_type[] = | 
 | 278 | 	{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, | 
 | 279 | 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, | 
 | 280 | 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, | 
 | 281 | 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, | 
 | 282 | 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, | 
 | 283 | 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, | 
 | 284 | 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, | 
 | 285 | 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, | 
 | 286 | 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, | 
 | 287 | 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, | 
 | 288 | 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, | 
 | 289 | 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, | 
 | 290 | 	 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, | 
| Rémi Denis-Courmont | 2d91d78 | 2008-12-17 15:47:29 -0800 | [diff] [blame] | 291 | 	 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, | 
| Dmitry Eremin-Solenikov | 929122cd | 2009-08-14 20:00:20 +0400 | [diff] [blame] | 292 | 	 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, | 
| Sergey Lapin | fcb94e4 | 2009-06-08 12:18:47 +0000 | [diff] [blame] | 293 | 	 ARPHRD_VOID, ARPHRD_NONE}; | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 294 |  | 
| Jan Engelhardt | 36cbd3d | 2009-08-05 10:42:58 -0700 | [diff] [blame] | 295 | static const char *const netdev_lock_name[] = | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 296 | 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", | 
 | 297 | 	 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", | 
 | 298 | 	 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", | 
 | 299 | 	 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", | 
 | 300 | 	 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", | 
 | 301 | 	 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", | 
 | 302 | 	 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", | 
 | 303 | 	 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", | 
 | 304 | 	 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", | 
 | 305 | 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", | 
 | 306 | 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", | 
 | 307 | 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", | 
 | 308 | 	 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", | 
| Rémi Denis-Courmont | 2d91d78 | 2008-12-17 15:47:29 -0800 | [diff] [blame] | 309 | 	 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", | 
| Dmitry Eremin-Solenikov | 929122cd | 2009-08-14 20:00:20 +0400 | [diff] [blame] | 310 | 	 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", | 
| Sergey Lapin | fcb94e4 | 2009-06-08 12:18:47 +0000 | [diff] [blame] | 311 | 	 "_xmit_VOID", "_xmit_NONE"}; | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 312 |  | 
 | 313 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 
| David S. Miller | cf508b1 | 2008-07-22 14:16:42 -0700 | [diff] [blame] | 314 | static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 315 |  | 
 | 316 | static inline unsigned short netdev_lock_pos(unsigned short dev_type) | 
 | 317 | { | 
 | 318 | 	int i; | 
 | 319 |  | 
 | 320 | 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) | 
 | 321 | 		if (netdev_lock_type[i] == dev_type) | 
 | 322 | 			return i; | 
 | 323 | 	/* the last key is used by default */ | 
 | 324 | 	return ARRAY_SIZE(netdev_lock_type) - 1; | 
 | 325 | } | 
 | 326 |  | 
| David S. Miller | cf508b1 | 2008-07-22 14:16:42 -0700 | [diff] [blame] | 327 | static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, | 
 | 328 | 						 unsigned short dev_type) | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 329 | { | 
 | 330 | 	int i; | 
 | 331 |  | 
 | 332 | 	i = netdev_lock_pos(dev_type); | 
 | 333 | 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], | 
 | 334 | 				   netdev_lock_name[i]); | 
 | 335 | } | 
| David S. Miller | cf508b1 | 2008-07-22 14:16:42 -0700 | [diff] [blame] | 336 |  | 
 | 337 | static inline void netdev_set_addr_lockdep_class(struct net_device *dev) | 
 | 338 | { | 
 | 339 | 	int i; | 
 | 340 |  | 
 | 341 | 	i = netdev_lock_pos(dev->type); | 
 | 342 | 	lockdep_set_class_and_name(&dev->addr_list_lock, | 
 | 343 | 				   &netdev_addr_lock_key[i], | 
 | 344 | 				   netdev_lock_name[i]); | 
 | 345 | } | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 346 | #else | 
| David S. Miller | cf508b1 | 2008-07-22 14:16:42 -0700 | [diff] [blame] | 347 | static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, | 
 | 348 | 						 unsigned short dev_type) | 
 | 349 | { | 
 | 350 | } | 
 | 351 | static inline void netdev_set_addr_lockdep_class(struct net_device *dev) | 
| Jarek Poplawski | 723e98b | 2007-05-15 22:46:18 -0700 | [diff] [blame] | 352 | { | 
 | 353 | } | 
 | 354 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 |  | 
 | 356 | /******************************************************************************* | 
 | 357 |  | 
 | 358 | 		Protocol management and registration routines | 
 | 359 |  | 
 | 360 | *******************************************************************************/ | 
 | 361 |  | 
 | 362 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 |  *	Add a protocol ID to the list. Now that the input handler is | 
 | 364 |  *	smarter we can dispense with all the messy stuff that used to be | 
 | 365 |  *	here. | 
 | 366 |  * | 
 | 367 |  *	BEWARE!!! Protocol handlers, mangling input packets, | 
 | 368 |  *	MUST BE last in hash buckets and checking protocol handlers | 
 | 369 |  *	MUST start from promiscuous ptype_all chain in net_bh. | 
 | 370 |  *	It is true now, do not change it. | 
 | 371 |  *	Explanation follows: if protocol handler, mangling packet, will | 
 | 372 |  *	be the first on list, it is not able to sense, that packet | 
 | 373 |  *	is cloned and should be copied-on-write, so that it will | 
 | 374 |  *	change it and subsequent readers will get broken packet. | 
 | 375 |  *							--ANK (980803) | 
 | 376 |  */ | 
 | 377 |  | 
| Eric Dumazet | c07b68e | 2010-09-02 03:53:46 +0000 | [diff] [blame] | 378 | static inline struct list_head *ptype_head(const struct packet_type *pt) | 
 | 379 | { | 
 | 380 | 	if (pt->type == htons(ETH_P_ALL)) | 
 | 381 | 		return &ptype_all; | 
 | 382 | 	else | 
 | 383 | 		return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; | 
 | 384 | } | 
 | 385 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | /** | 
 | 387 |  *	dev_add_pack - add packet handler | 
 | 388 |  *	@pt: packet type declaration | 
 | 389 |  * | 
 | 390 |  *	Add a protocol handler to the networking stack. The passed &packet_type | 
 | 391 |  *	is linked into kernel lists and may not be freed until it has been | 
 | 392 |  *	removed from the kernel lists. | 
 | 393 |  * | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 394 |  *	This call does not sleep therefore it can not | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 |  *	guarantee all CPU's that are in middle of receiving packets | 
 | 396 |  *	will see the new packet type (until the next received packet). | 
 | 397 |  */ | 
 | 398 |  | 
 | 399 | void dev_add_pack(struct packet_type *pt) | 
 | 400 | { | 
| Eric Dumazet | c07b68e | 2010-09-02 03:53:46 +0000 | [diff] [blame] | 401 | 	struct list_head *head = ptype_head(pt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 |  | 
| Eric Dumazet | c07b68e | 2010-09-02 03:53:46 +0000 | [diff] [blame] | 403 | 	spin_lock(&ptype_lock); | 
 | 404 | 	list_add_rcu(&pt->list, head); | 
 | 405 | 	spin_unlock(&ptype_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 407 | EXPORT_SYMBOL(dev_add_pack); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | /** | 
 | 410 |  *	__dev_remove_pack	 - remove packet handler | 
 | 411 |  *	@pt: packet type declaration | 
 | 412 |  * | 
 | 413 |  *	Remove a protocol handler that was previously added to the kernel | 
 | 414 |  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed | 
 | 415 |  *	from the kernel lists and can be freed or reused once this function | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 416 |  *	returns. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 |  * | 
 | 418 |  *      The packet type might still be in use by receivers | 
 | 419 |  *	and must not be freed until after all the CPU's have gone | 
 | 420 |  *	through a quiescent state. | 
 | 421 |  */ | 
 | 422 | void __dev_remove_pack(struct packet_type *pt) | 
 | 423 | { | 
| Eric Dumazet | c07b68e | 2010-09-02 03:53:46 +0000 | [diff] [blame] | 424 | 	struct list_head *head = ptype_head(pt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | 	struct packet_type *pt1; | 
 | 426 |  | 
| Eric Dumazet | c07b68e | 2010-09-02 03:53:46 +0000 | [diff] [blame] | 427 | 	spin_lock(&ptype_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 |  | 
 | 429 | 	list_for_each_entry(pt1, head, list) { | 
 | 430 | 		if (pt == pt1) { | 
 | 431 | 			list_del_rcu(&pt->list); | 
 | 432 | 			goto out; | 
 | 433 | 		} | 
 | 434 | 	} | 
 | 435 |  | 
 | 436 | 	printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); | 
 | 437 | out: | 
| Eric Dumazet | c07b68e | 2010-09-02 03:53:46 +0000 | [diff] [blame] | 438 | 	spin_unlock(&ptype_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 440 | EXPORT_SYMBOL(__dev_remove_pack); | 
 | 441 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | /** | 
 | 443 |  *	dev_remove_pack	 - remove packet handler | 
 | 444 |  *	@pt: packet type declaration | 
 | 445 |  * | 
 | 446 |  *	Remove a protocol handler that was previously added to the kernel | 
 | 447 |  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed | 
 | 448 |  *	from the kernel lists and can be freed or reused once this function | 
 | 449 |  *	returns. | 
 | 450 |  * | 
 | 451 |  *	This call sleeps to guarantee that no CPU is looking at the packet | 
 | 452 |  *	type after return. | 
 | 453 |  */ | 
 | 454 | void dev_remove_pack(struct packet_type *pt) | 
 | 455 | { | 
 | 456 | 	__dev_remove_pack(pt); | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 457 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | 	synchronize_net(); | 
 | 459 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 460 | EXPORT_SYMBOL(dev_remove_pack); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 |  | 
 | 462 | /****************************************************************************** | 
 | 463 |  | 
 | 464 | 		      Device Boot-time Settings Routines | 
 | 465 |  | 
 | 466 | *******************************************************************************/ | 
 | 467 |  | 
 | 468 | /* Boot time configuration table */ | 
 | 469 | static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; | 
 | 470 |  | 
 | 471 | /** | 
 | 472 |  *	netdev_boot_setup_add	- add new setup entry | 
 | 473 |  *	@name: name of the device | 
 | 474 |  *	@map: configured settings for the device | 
 | 475 |  * | 
 | 476 |  *	Adds new setup entry to the dev_boot_setup list.  The function | 
 | 477 |  *	returns 0 on error and 1 on success.  This is a generic routine to | 
 | 478 |  *	all netdevices. | 
 | 479 |  */ | 
 | 480 | static int netdev_boot_setup_add(char *name, struct ifmap *map) | 
 | 481 | { | 
 | 482 | 	struct netdev_boot_setup *s; | 
 | 483 | 	int i; | 
 | 484 |  | 
 | 485 | 	s = dev_boot_setup; | 
 | 486 | 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { | 
 | 487 | 		if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { | 
 | 488 | 			memset(s[i].name, 0, sizeof(s[i].name)); | 
| Wang Chen | 93b3cff | 2008-07-01 19:57:19 -0700 | [diff] [blame] | 489 | 			strlcpy(s[i].name, name, IFNAMSIZ); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | 			memcpy(&s[i].map, map, sizeof(s[i].map)); | 
 | 491 | 			break; | 
 | 492 | 		} | 
 | 493 | 	} | 
 | 494 |  | 
 | 495 | 	return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; | 
 | 496 | } | 
 | 497 |  | 
 | 498 | /** | 
 | 499 |  *	netdev_boot_setup_check	- check boot time settings | 
 | 500 |  *	@dev: the netdevice | 
 | 501 |  * | 
 | 502 |  * 	Check boot time settings for the device. | 
 | 503 |  *	The found settings are set for the device to be used | 
 | 504 |  *	later in the device probing. | 
 | 505 |  *	Returns 0 if no settings found, 1 if they are. | 
 | 506 |  */ | 
 | 507 | int netdev_boot_setup_check(struct net_device *dev) | 
 | 508 | { | 
 | 509 | 	struct netdev_boot_setup *s = dev_boot_setup; | 
 | 510 | 	int i; | 
 | 511 |  | 
 | 512 | 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { | 
 | 513 | 		if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && | 
| Wang Chen | 93b3cff | 2008-07-01 19:57:19 -0700 | [diff] [blame] | 514 | 		    !strcmp(dev->name, s[i].name)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | 			dev->irq 	= s[i].map.irq; | 
 | 516 | 			dev->base_addr 	= s[i].map.base_addr; | 
 | 517 | 			dev->mem_start 	= s[i].map.mem_start; | 
 | 518 | 			dev->mem_end 	= s[i].map.mem_end; | 
 | 519 | 			return 1; | 
 | 520 | 		} | 
 | 521 | 	} | 
 | 522 | 	return 0; | 
 | 523 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 524 | EXPORT_SYMBOL(netdev_boot_setup_check); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 |  | 
 | 526 |  | 
 | 527 | /** | 
 | 528 |  *	netdev_boot_base	- get address from boot time settings | 
 | 529 |  *	@prefix: prefix for network device | 
 | 530 |  *	@unit: id for network device | 
 | 531 |  * | 
 | 532 |  * 	Check boot time settings for the base address of device. | 
 | 533 |  *	The found settings are set for the device to be used | 
 | 534 |  *	later in the device probing. | 
 | 535 |  *	Returns 0 if no settings found. | 
 | 536 |  */ | 
 | 537 | unsigned long netdev_boot_base(const char *prefix, int unit) | 
 | 538 | { | 
 | 539 | 	const struct netdev_boot_setup *s = dev_boot_setup; | 
 | 540 | 	char name[IFNAMSIZ]; | 
 | 541 | 	int i; | 
 | 542 |  | 
 | 543 | 	sprintf(name, "%s%d", prefix, unit); | 
 | 544 |  | 
 | 545 | 	/* | 
 | 546 | 	 * If device already registered then return base of 1 | 
 | 547 | 	 * to indicate not to probe for this interface | 
 | 548 | 	 */ | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 549 | 	if (__dev_get_by_name(&init_net, name)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | 		return 1; | 
 | 551 |  | 
 | 552 | 	for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) | 
 | 553 | 		if (!strcmp(name, s[i].name)) | 
 | 554 | 			return s[i].map.base_addr; | 
 | 555 | 	return 0; | 
 | 556 | } | 
 | 557 |  | 
 | 558 | /* | 
 | 559 |  * Saves at boot time configured settings for any netdevice. | 
 | 560 |  */ | 
 | 561 | int __init netdev_boot_setup(char *str) | 
 | 562 | { | 
 | 563 | 	int ints[5]; | 
 | 564 | 	struct ifmap map; | 
 | 565 |  | 
 | 566 | 	str = get_options(str, ARRAY_SIZE(ints), ints); | 
 | 567 | 	if (!str || !*str) | 
 | 568 | 		return 0; | 
 | 569 |  | 
 | 570 | 	/* Save settings */ | 
 | 571 | 	memset(&map, 0, sizeof(map)); | 
 | 572 | 	if (ints[0] > 0) | 
 | 573 | 		map.irq = ints[1]; | 
 | 574 | 	if (ints[0] > 1) | 
 | 575 | 		map.base_addr = ints[2]; | 
 | 576 | 	if (ints[0] > 2) | 
 | 577 | 		map.mem_start = ints[3]; | 
 | 578 | 	if (ints[0] > 3) | 
 | 579 | 		map.mem_end = ints[4]; | 
 | 580 |  | 
 | 581 | 	/* Add new entry to the list */ | 
 | 582 | 	return netdev_boot_setup_add(str, &map); | 
 | 583 | } | 
 | 584 |  | 
 | 585 | __setup("netdev=", netdev_boot_setup); | 
 | 586 |  | 
 | 587 | /******************************************************************************* | 
 | 588 |  | 
 | 589 | 			    Device Interface Subroutines | 
 | 590 |  | 
 | 591 | *******************************************************************************/ | 
 | 592 |  | 
 | 593 | /** | 
 | 594 |  *	__dev_get_by_name	- find a device by its name | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 595 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 |  *	@name: name to find | 
 | 597 |  * | 
 | 598 |  *	Find an interface by name. Must be called under RTNL semaphore | 
 | 599 |  *	or @dev_base_lock. If the name is found a pointer to the device | 
 | 600 |  *	is returned. If the name is not found then %NULL is returned. The | 
 | 601 |  *	reference counters are not incremented so the caller must be | 
 | 602 |  *	careful with locks. | 
 | 603 |  */ | 
 | 604 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 605 | struct net_device *__dev_get_by_name(struct net *net, const char *name) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | { | 
 | 607 | 	struct hlist_node *p; | 
| Eric Dumazet | 0bd8d53 | 2009-10-30 01:40:11 -0700 | [diff] [blame] | 608 | 	struct net_device *dev; | 
 | 609 | 	struct hlist_head *head = dev_name_hash(net, name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 |  | 
| Eric Dumazet | 0bd8d53 | 2009-10-30 01:40:11 -0700 | [diff] [blame] | 611 | 	hlist_for_each_entry(dev, p, head, name_hlist) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | 		if (!strncmp(dev->name, name, IFNAMSIZ)) | 
 | 613 | 			return dev; | 
| Eric Dumazet | 0bd8d53 | 2009-10-30 01:40:11 -0700 | [diff] [blame] | 614 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | 	return NULL; | 
 | 616 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 617 | EXPORT_SYMBOL(__dev_get_by_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 |  | 
 | 619 | /** | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 620 |  *	dev_get_by_name_rcu	- find a device by its name | 
 | 621 |  *	@net: the applicable net namespace | 
 | 622 |  *	@name: name to find | 
 | 623 |  * | 
 | 624 |  *	Find an interface by name. | 
 | 625 |  *	If the name is found a pointer to the device is returned. | 
 | 626 |  * 	If the name is not found then %NULL is returned. | 
 | 627 |  *	The reference counters are not incremented so the caller must be | 
 | 628 |  *	careful with locks. The caller must hold RCU lock. | 
 | 629 |  */ | 
 | 630 |  | 
 | 631 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) | 
 | 632 | { | 
 | 633 | 	struct hlist_node *p; | 
 | 634 | 	struct net_device *dev; | 
 | 635 | 	struct hlist_head *head = dev_name_hash(net, name); | 
 | 636 |  | 
 | 637 | 	hlist_for_each_entry_rcu(dev, p, head, name_hlist) | 
 | 638 | 		if (!strncmp(dev->name, name, IFNAMSIZ)) | 
 | 639 | 			return dev; | 
 | 640 |  | 
 | 641 | 	return NULL; | 
 | 642 | } | 
 | 643 | EXPORT_SYMBOL(dev_get_by_name_rcu); | 
 | 644 |  | 
 | 645 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 |  *	dev_get_by_name		- find a device by its name | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 647 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 |  *	@name: name to find | 
 | 649 |  * | 
 | 650 |  *	Find an interface by name. This can be called from any | 
 | 651 |  *	context and does its own locking. The returned handle has | 
 | 652 |  *	the usage count incremented and the caller must use dev_put() to | 
 | 653 |  *	release it when it is no longer needed. %NULL is returned if no | 
 | 654 |  *	matching device is found. | 
 | 655 |  */ | 
 | 656 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 657 | struct net_device *dev_get_by_name(struct net *net, const char *name) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | { | 
 | 659 | 	struct net_device *dev; | 
 | 660 |  | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 661 | 	rcu_read_lock(); | 
 | 662 | 	dev = dev_get_by_name_rcu(net, name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | 	if (dev) | 
 | 664 | 		dev_hold(dev); | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 665 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | 	return dev; | 
 | 667 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 668 | EXPORT_SYMBOL(dev_get_by_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 |  | 
 | 670 | /** | 
 | 671 |  *	__dev_get_by_index - find a device by its ifindex | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 672 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 |  *	@ifindex: index of device | 
 | 674 |  * | 
 | 675 |  *	Search for an interface by index. Returns %NULL if the device | 
 | 676 |  *	is not found or a pointer to the device. The device has not | 
 | 677 |  *	had its reference counter increased so the caller must be careful | 
 | 678 |  *	about locking. The caller must hold either the RTNL semaphore | 
 | 679 |  *	or @dev_base_lock. | 
 | 680 |  */ | 
 | 681 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 682 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | { | 
 | 684 | 	struct hlist_node *p; | 
| Eric Dumazet | 0bd8d53 | 2009-10-30 01:40:11 -0700 | [diff] [blame] | 685 | 	struct net_device *dev; | 
 | 686 | 	struct hlist_head *head = dev_index_hash(net, ifindex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 |  | 
| Eric Dumazet | 0bd8d53 | 2009-10-30 01:40:11 -0700 | [diff] [blame] | 688 | 	hlist_for_each_entry(dev, p, head, index_hlist) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | 		if (dev->ifindex == ifindex) | 
 | 690 | 			return dev; | 
| Eric Dumazet | 0bd8d53 | 2009-10-30 01:40:11 -0700 | [diff] [blame] | 691 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | 	return NULL; | 
 | 693 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 694 | EXPORT_SYMBOL(__dev_get_by_index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 |  | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 696 | /** | 
 | 697 |  *	dev_get_by_index_rcu - find a device by its ifindex | 
 | 698 |  *	@net: the applicable net namespace | 
 | 699 |  *	@ifindex: index of device | 
 | 700 |  * | 
 | 701 |  *	Search for an interface by index. Returns %NULL if the device | 
 | 702 |  *	is not found or a pointer to the device. The device has not | 
 | 703 |  *	had its reference counter increased so the caller must be careful | 
 | 704 |  *	about locking. The caller must hold RCU lock. | 
 | 705 |  */ | 
 | 706 |  | 
 | 707 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) | 
 | 708 | { | 
 | 709 | 	struct hlist_node *p; | 
 | 710 | 	struct net_device *dev; | 
 | 711 | 	struct hlist_head *head = dev_index_hash(net, ifindex); | 
 | 712 |  | 
 | 713 | 	hlist_for_each_entry_rcu(dev, p, head, index_hlist) | 
 | 714 | 		if (dev->ifindex == ifindex) | 
 | 715 | 			return dev; | 
 | 716 |  | 
 | 717 | 	return NULL; | 
 | 718 | } | 
 | 719 | EXPORT_SYMBOL(dev_get_by_index_rcu); | 
 | 720 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 |  | 
 | 722 | /** | 
 | 723 |  *	dev_get_by_index - find a device by its ifindex | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 724 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 |  *	@ifindex: index of device | 
 | 726 |  * | 
 | 727 |  *	Search for an interface by index. Returns NULL if the device | 
 | 728 |  *	is not found or a pointer to the device. The device returned has | 
 | 729 |  *	had a reference added and the pointer is safe until the user calls | 
 | 730 |  *	dev_put to indicate they have finished with it. | 
 | 731 |  */ | 
 | 732 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 733 | struct net_device *dev_get_by_index(struct net *net, int ifindex) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | { | 
 | 735 | 	struct net_device *dev; | 
 | 736 |  | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 737 | 	rcu_read_lock(); | 
 | 738 | 	dev = dev_get_by_index_rcu(net, ifindex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | 	if (dev) | 
 | 740 | 		dev_hold(dev); | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 741 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | 	return dev; | 
 | 743 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 744 | EXPORT_SYMBOL(dev_get_by_index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 |  | 
 | 746 | /** | 
| Eric Dumazet | 941666c | 2010-12-05 01:23:53 +0000 | [diff] [blame] | 747 |  *	dev_getbyhwaddr_rcu - find a device by its hardware address | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 748 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 |  *	@type: media type of device | 
 | 750 |  *	@ha: hardware address | 
 | 751 |  * | 
 | 752 |  *	Search for an interface by MAC address. Returns NULL if the device | 
| Eric Dumazet | c506653 | 2011-01-24 13:16:16 -0800 | [diff] [blame] | 753 |  *	is not found or a pointer to the device. | 
 | 754 |  *	The caller must hold RCU or RTNL. | 
| Eric Dumazet | 941666c | 2010-12-05 01:23:53 +0000 | [diff] [blame] | 755 |  *	The returned device has not had its ref count increased | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 |  *	and the caller must therefore be careful about locking | 
 | 757 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 |  */ | 
 | 759 |  | 
| Eric Dumazet | 941666c | 2010-12-05 01:23:53 +0000 | [diff] [blame] | 760 | struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | 
 | 761 | 				       const char *ha) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | { | 
 | 763 | 	struct net_device *dev; | 
 | 764 |  | 
| Eric Dumazet | 941666c | 2010-12-05 01:23:53 +0000 | [diff] [blame] | 765 | 	for_each_netdev_rcu(net, dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | 		if (dev->type == type && | 
 | 767 | 		    !memcmp(dev->dev_addr, ha, dev->addr_len)) | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 768 | 			return dev; | 
 | 769 |  | 
 | 770 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | } | 
| Eric Dumazet | 941666c | 2010-12-05 01:23:53 +0000 | [diff] [blame] | 772 | EXPORT_SYMBOL(dev_getbyhwaddr_rcu); | 
| Jochen Friedrich | cf309e3 | 2005-09-22 04:44:55 -0300 | [diff] [blame] | 773 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 774 | struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) | 
| Patrick McHardy | 4e9cac2 | 2007-05-03 03:28:13 -0700 | [diff] [blame] | 775 | { | 
 | 776 | 	struct net_device *dev; | 
 | 777 |  | 
 | 778 | 	ASSERT_RTNL(); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 779 | 	for_each_netdev(net, dev) | 
| Patrick McHardy | 4e9cac2 | 2007-05-03 03:28:13 -0700 | [diff] [blame] | 780 | 		if (dev->type == type) | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 781 | 			return dev; | 
 | 782 |  | 
 | 783 | 	return NULL; | 
| Patrick McHardy | 4e9cac2 | 2007-05-03 03:28:13 -0700 | [diff] [blame] | 784 | } | 
| Patrick McHardy | 4e9cac2 | 2007-05-03 03:28:13 -0700 | [diff] [blame] | 785 | EXPORT_SYMBOL(__dev_getfirstbyhwtype); | 
 | 786 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 787 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | { | 
| Eric Dumazet | 99fe3c3 | 2010-03-18 11:27:25 +0000 | [diff] [blame] | 789 | 	struct net_device *dev, *ret = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 |  | 
| Eric Dumazet | 99fe3c3 | 2010-03-18 11:27:25 +0000 | [diff] [blame] | 791 | 	rcu_read_lock(); | 
 | 792 | 	for_each_netdev_rcu(net, dev) | 
 | 793 | 		if (dev->type == type) { | 
 | 794 | 			dev_hold(dev); | 
 | 795 | 			ret = dev; | 
 | 796 | 			break; | 
 | 797 | 		} | 
 | 798 | 	rcu_read_unlock(); | 
 | 799 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | 
 | 802 |  | 
 | 803 | /** | 
| Eric Dumazet | bb69ae0 | 2010-06-07 11:42:13 +0000 | [diff] [blame] | 804 |  *	dev_get_by_flags_rcu - find any device with given flags | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 805 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 |  *	@if_flags: IFF_* values | 
 | 807 |  *	@mask: bitmask of bits in if_flags to check | 
 | 808 |  * | 
 | 809 |  *	Search for any interface with the given flags. Returns NULL if a device | 
| Eric Dumazet | bb69ae0 | 2010-06-07 11:42:13 +0000 | [diff] [blame] | 810 |  *	is not found or a pointer to the device. Must be called inside | 
 | 811 |  *	rcu_read_lock(), and result refcount is unchanged. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 |  */ | 
 | 813 |  | 
| Eric Dumazet | bb69ae0 | 2010-06-07 11:42:13 +0000 | [diff] [blame] | 814 | struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 815 | 				    unsigned short mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | { | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 817 | 	struct net_device *dev, *ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 |  | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 819 | 	ret = NULL; | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 820 | 	for_each_netdev_rcu(net, dev) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | 		if (((dev->flags ^ if_flags) & mask) == 0) { | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 822 | 			ret = dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | 			break; | 
 | 824 | 		} | 
 | 825 | 	} | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 826 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | } | 
| Eric Dumazet | bb69ae0 | 2010-06-07 11:42:13 +0000 | [diff] [blame] | 828 | EXPORT_SYMBOL(dev_get_by_flags_rcu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 |  | 
 | 830 | /** | 
 | 831 |  *	dev_valid_name - check if name is okay for network device | 
 | 832 |  *	@name: name string | 
 | 833 |  * | 
 | 834 |  *	Network device names need to be valid file names to | 
| David S. Miller | c7fa9d1 | 2006-08-15 16:34:13 -0700 | [diff] [blame] | 835 |  *	to allow sysfs to work.  We also disallow any kind of | 
 | 836 |  *	whitespace. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 |  */ | 
| Mitch Williams | c2373ee | 2005-11-09 10:34:45 -0800 | [diff] [blame] | 838 | int dev_valid_name(const char *name) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | { | 
| David S. Miller | c7fa9d1 | 2006-08-15 16:34:13 -0700 | [diff] [blame] | 840 | 	if (*name == '\0') | 
 | 841 | 		return 0; | 
| Stephen Hemminger | b6fe17d | 2006-08-29 17:06:13 -0700 | [diff] [blame] | 842 | 	if (strlen(name) >= IFNAMSIZ) | 
 | 843 | 		return 0; | 
| David S. Miller | c7fa9d1 | 2006-08-15 16:34:13 -0700 | [diff] [blame] | 844 | 	if (!strcmp(name, ".") || !strcmp(name, "..")) | 
 | 845 | 		return 0; | 
 | 846 |  | 
 | 847 | 	while (*name) { | 
 | 848 | 		if (*name == '/' || isspace(*name)) | 
 | 849 | 			return 0; | 
 | 850 | 		name++; | 
 | 851 | 	} | 
 | 852 | 	return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 854 | EXPORT_SYMBOL(dev_valid_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 |  | 
 | 856 | /** | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 857 |  *	__dev_alloc_name - allocate a name for a device | 
 | 858 |  *	@net: network namespace to allocate the device name in | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 |  *	@name: name format string | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 860 |  *	@buf:  scratch buffer and result name string | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 |  * | 
 | 862 |  *	Passed a format string - eg "lt%d" it will try and find a suitable | 
| Stephen Hemminger | 3041a06 | 2006-05-26 13:25:24 -0700 | [diff] [blame] | 863 |  *	id. It scans list of devices to build up a free map, then chooses | 
 | 864 |  *	the first empty slot. The caller must hold the dev_base or rtnl lock | 
 | 865 |  *	while allocating the name and adding the device in order to avoid | 
 | 866 |  *	duplicates. | 
 | 867 |  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms). | 
 | 868 |  *	Returns the number of the unit assigned or a negative errno code. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 |  */ | 
 | 870 |  | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 871 | static int __dev_alloc_name(struct net *net, const char *name, char *buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | { | 
 | 873 | 	int i = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | 	const char *p; | 
 | 875 | 	const int max_netdevices = 8*PAGE_SIZE; | 
| Stephen Hemminger | cfcabdc | 2007-10-09 01:59:42 -0700 | [diff] [blame] | 876 | 	unsigned long *inuse; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | 	struct net_device *d; | 
 | 878 |  | 
 | 879 | 	p = strnchr(name, IFNAMSIZ-1, '%'); | 
 | 880 | 	if (p) { | 
 | 881 | 		/* | 
 | 882 | 		 * Verify the string as this thing may have come from | 
 | 883 | 		 * the user.  There must be either one "%d" and no other "%" | 
 | 884 | 		 * characters. | 
 | 885 | 		 */ | 
 | 886 | 		if (p[1] != 'd' || strchr(p + 2, '%')) | 
 | 887 | 			return -EINVAL; | 
 | 888 |  | 
 | 889 | 		/* Use one page as a bit array of possible slots */ | 
| Stephen Hemminger | cfcabdc | 2007-10-09 01:59:42 -0700 | [diff] [blame] | 890 | 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | 		if (!inuse) | 
 | 892 | 			return -ENOMEM; | 
 | 893 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 894 | 		for_each_netdev(net, d) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | 			if (!sscanf(d->name, name, &i)) | 
 | 896 | 				continue; | 
 | 897 | 			if (i < 0 || i >= max_netdevices) | 
 | 898 | 				continue; | 
 | 899 |  | 
 | 900 | 			/*  avoid cases where sscanf is not exact inverse of printf */ | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 901 | 			snprintf(buf, IFNAMSIZ, name, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | 			if (!strncmp(buf, d->name, IFNAMSIZ)) | 
 | 903 | 				set_bit(i, inuse); | 
 | 904 | 		} | 
 | 905 |  | 
 | 906 | 		i = find_first_zero_bit(inuse, max_netdevices); | 
 | 907 | 		free_page((unsigned long) inuse); | 
 | 908 | 	} | 
 | 909 |  | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 910 | 	if (buf != name) | 
 | 911 | 		snprintf(buf, IFNAMSIZ, name, i); | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 912 | 	if (!__dev_get_by_name(net, buf)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | 		return i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 |  | 
 | 915 | 	/* It is possible to run out of possible slots | 
 | 916 | 	 * when the name is long and there isn't enough space left | 
 | 917 | 	 * for the digits, or if all bits are used. | 
 | 918 | 	 */ | 
 | 919 | 	return -ENFILE; | 
 | 920 | } | 
 | 921 |  | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 922 | /** | 
 | 923 |  *	dev_alloc_name - allocate a name for a device | 
 | 924 |  *	@dev: device | 
 | 925 |  *	@name: name format string | 
 | 926 |  * | 
 | 927 |  *	Passed a format string - eg "lt%d" it will try and find a suitable | 
 | 928 |  *	id. It scans list of devices to build up a free map, then chooses | 
 | 929 |  *	the first empty slot. The caller must hold the dev_base or rtnl lock | 
 | 930 |  *	while allocating the name and adding the device in order to avoid | 
 | 931 |  *	duplicates. | 
 | 932 |  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms). | 
 | 933 |  *	Returns the number of the unit assigned or a negative errno code. | 
 | 934 |  */ | 
 | 935 |  | 
 | 936 | int dev_alloc_name(struct net_device *dev, const char *name) | 
 | 937 | { | 
 | 938 | 	char buf[IFNAMSIZ]; | 
 | 939 | 	struct net *net; | 
 | 940 | 	int ret; | 
 | 941 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 942 | 	BUG_ON(!dev_net(dev)); | 
 | 943 | 	net = dev_net(dev); | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 944 | 	ret = __dev_alloc_name(net, name, buf); | 
 | 945 | 	if (ret >= 0) | 
 | 946 | 		strlcpy(dev->name, buf, IFNAMSIZ); | 
 | 947 | 	return ret; | 
 | 948 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 949 | EXPORT_SYMBOL(dev_alloc_name); | 
| Eric W. Biederman | b267b17 | 2007-09-12 13:48:45 +0200 | [diff] [blame] | 950 |  | 
| Daniel Lezcano | 8ce6cebc | 2010-05-19 10:12:19 +0000 | [diff] [blame] | 951 | static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 952 | { | 
| Daniel Lezcano | 8ce6cebc | 2010-05-19 10:12:19 +0000 | [diff] [blame] | 953 | 	struct net *net; | 
 | 954 |  | 
 | 955 | 	BUG_ON(!dev_net(dev)); | 
 | 956 | 	net = dev_net(dev); | 
 | 957 |  | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 958 | 	if (!dev_valid_name(name)) | 
 | 959 | 		return -EINVAL; | 
 | 960 |  | 
 | 961 | 	if (fmt && strchr(name, '%')) | 
| Daniel Lezcano | 8ce6cebc | 2010-05-19 10:12:19 +0000 | [diff] [blame] | 962 | 		return dev_alloc_name(dev, name); | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 963 | 	else if (__dev_get_by_name(net, name)) | 
 | 964 | 		return -EEXIST; | 
| Daniel Lezcano | 8ce6cebc | 2010-05-19 10:12:19 +0000 | [diff] [blame] | 965 | 	else if (dev->name != name) | 
 | 966 | 		strlcpy(dev->name, name, IFNAMSIZ); | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 967 |  | 
 | 968 | 	return 0; | 
 | 969 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 |  | 
 | 971 | /** | 
 | 972 |  *	dev_change_name - change name of a device | 
 | 973 |  *	@dev: device | 
 | 974 |  *	@newname: name (or format string) must be at least IFNAMSIZ | 
 | 975 |  * | 
 | 976 |  *	Change name of a device, can pass format strings "eth%d". | 
 | 977 |  *	for wildcarding. | 
 | 978 |  */ | 
| Stephen Hemminger | cf04a4c7 | 2008-09-30 02:22:14 -0700 | [diff] [blame] | 979 | int dev_change_name(struct net_device *dev, const char *newname) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | { | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 981 | 	char oldname[IFNAMSIZ]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | 	int err = 0; | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 983 | 	int ret; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 984 | 	struct net *net; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 |  | 
 | 986 | 	ASSERT_RTNL(); | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 987 | 	BUG_ON(!dev_net(dev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 989 | 	net = dev_net(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 990 | 	if (dev->flags & IFF_UP) | 
 | 991 | 		return -EBUSY; | 
 | 992 |  | 
| Stephen Hemminger | c8d90dc | 2007-10-26 03:53:42 -0700 | [diff] [blame] | 993 | 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) | 
 | 994 | 		return 0; | 
 | 995 |  | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 996 | 	memcpy(oldname, dev->name, IFNAMSIZ); | 
 | 997 |  | 
| Daniel Lezcano | 8ce6cebc | 2010-05-19 10:12:19 +0000 | [diff] [blame] | 998 | 	err = dev_get_valid_name(dev, newname, 1); | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 999 | 	if (err < 0) | 
 | 1000 | 		return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 |  | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1002 | rollback: | 
| Eric W. Biederman | a1b3f59 | 2010-05-04 17:36:49 -0700 | [diff] [blame] | 1003 | 	ret = device_rename(&dev->dev, dev->name); | 
 | 1004 | 	if (ret) { | 
 | 1005 | 		memcpy(dev->name, oldname, IFNAMSIZ); | 
 | 1006 | 		return ret; | 
| Stephen Hemminger | dcc9977 | 2008-05-14 22:33:38 -0700 | [diff] [blame] | 1007 | 	} | 
| Herbert Xu | 7f988ea | 2007-07-30 16:35:46 -0700 | [diff] [blame] | 1008 |  | 
 | 1009 | 	write_lock_bh(&dev_base_lock); | 
| Eric W. Biederman | 9274982 | 2007-04-03 00:07:30 -0600 | [diff] [blame] | 1010 | 	hlist_del(&dev->name_hlist); | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 1011 | 	write_unlock_bh(&dev_base_lock); | 
 | 1012 |  | 
 | 1013 | 	synchronize_rcu(); | 
 | 1014 |  | 
 | 1015 | 	write_lock_bh(&dev_base_lock); | 
 | 1016 | 	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); | 
| Herbert Xu | 7f988ea | 2007-07-30 16:35:46 -0700 | [diff] [blame] | 1017 | 	write_unlock_bh(&dev_base_lock); | 
 | 1018 |  | 
| Pavel Emelyanov | 056925a | 2007-09-16 15:42:43 -0700 | [diff] [blame] | 1019 | 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1020 | 	ret = notifier_to_errno(ret); | 
 | 1021 |  | 
 | 1022 | 	if (ret) { | 
| Eric Dumazet | 91e9c07b | 2009-11-15 23:30:24 +0000 | [diff] [blame] | 1023 | 		/* err >= 0 after dev_alloc_name() or stores the first errno */ | 
 | 1024 | 		if (err >= 0) { | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1025 | 			err = ret; | 
 | 1026 | 			memcpy(dev->name, oldname, IFNAMSIZ); | 
 | 1027 | 			goto rollback; | 
| Eric Dumazet | 91e9c07b | 2009-11-15 23:30:24 +0000 | [diff] [blame] | 1028 | 		} else { | 
 | 1029 | 			printk(KERN_ERR | 
 | 1030 | 			       "%s: name change rollback failed: %d.\n", | 
 | 1031 | 			       dev->name, ret); | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1032 | 		} | 
 | 1033 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 |  | 
 | 1035 | 	return err; | 
 | 1036 | } | 
 | 1037 |  | 
 | 1038 | /** | 
| Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 1039 |  *	dev_set_alias - change ifalias of a device | 
 | 1040 |  *	@dev: device | 
 | 1041 |  *	@alias: name up to IFALIASZ | 
| Stephen Hemminger | f0db275 | 2008-09-30 02:23:58 -0700 | [diff] [blame] | 1042 |  *	@len: limit of bytes to copy from info | 
| Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 1043 |  * | 
 | 1044 |  *	Set ifalias for a device, | 
 | 1045 |  */ | 
 | 1046 | int dev_set_alias(struct net_device *dev, const char *alias, size_t len) | 
 | 1047 | { | 
 | 1048 | 	ASSERT_RTNL(); | 
 | 1049 |  | 
 | 1050 | 	if (len >= IFALIASZ) | 
 | 1051 | 		return -EINVAL; | 
 | 1052 |  | 
| Oliver Hartkopp | 96ca4a2 | 2008-09-23 21:23:19 -0700 | [diff] [blame] | 1053 | 	if (!len) { | 
 | 1054 | 		if (dev->ifalias) { | 
 | 1055 | 			kfree(dev->ifalias); | 
 | 1056 | 			dev->ifalias = NULL; | 
 | 1057 | 		} | 
 | 1058 | 		return 0; | 
 | 1059 | 	} | 
 | 1060 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1061 | 	dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); | 
| Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 1062 | 	if (!dev->ifalias) | 
 | 1063 | 		return -ENOMEM; | 
 | 1064 |  | 
 | 1065 | 	strlcpy(dev->ifalias, alias, len+1); | 
 | 1066 | 	return len; | 
 | 1067 | } | 
 | 1068 |  | 
 | 1069 |  | 
 | 1070 | /** | 
| Stephen Hemminger | 3041a06 | 2006-05-26 13:25:24 -0700 | [diff] [blame] | 1071 |  *	netdev_features_change - device changes features | 
| Stephen Hemminger | d8a33ac | 2005-05-29 14:13:47 -0700 | [diff] [blame] | 1072 |  *	@dev: device to cause notification | 
 | 1073 |  * | 
 | 1074 |  *	Called to indicate a device has changed features. | 
 | 1075 |  */ | 
 | 1076 | void netdev_features_change(struct net_device *dev) | 
 | 1077 | { | 
| Pavel Emelyanov | 056925a | 2007-09-16 15:42:43 -0700 | [diff] [blame] | 1078 | 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); | 
| Stephen Hemminger | d8a33ac | 2005-05-29 14:13:47 -0700 | [diff] [blame] | 1079 | } | 
 | 1080 | EXPORT_SYMBOL(netdev_features_change); | 
 | 1081 |  | 
 | 1082 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 |  *	netdev_state_change - device changes state | 
 | 1084 |  *	@dev: device to cause notification | 
 | 1085 |  * | 
 | 1086 |  *	Called to indicate a device has changed state. This function calls | 
 | 1087 |  *	the notifier chains for netdev_chain and sends a NEWLINK message | 
 | 1088 |  *	to the routing socket. | 
 | 1089 |  */ | 
 | 1090 | void netdev_state_change(struct net_device *dev) | 
 | 1091 | { | 
 | 1092 | 	if (dev->flags & IFF_UP) { | 
| Pavel Emelyanov | 056925a | 2007-09-16 15:42:43 -0700 | [diff] [blame] | 1093 | 		call_netdevice_notifiers(NETDEV_CHANGE, dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0); | 
 | 1095 | 	} | 
 | 1096 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1097 | EXPORT_SYMBOL(netdev_state_change); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 |  | 
| Jiri Pirko | 3ca5b40 | 2010-03-10 10:29:35 +0000 | [diff] [blame] | 1099 | int netdev_bonding_change(struct net_device *dev, unsigned long event) | 
| Or Gerlitz | c1da4ac | 2008-06-13 18:12:00 -0700 | [diff] [blame] | 1100 | { | 
| Jiri Pirko | 3ca5b40 | 2010-03-10 10:29:35 +0000 | [diff] [blame] | 1101 | 	return call_netdevice_notifiers(event, dev); | 
| Or Gerlitz | c1da4ac | 2008-06-13 18:12:00 -0700 | [diff] [blame] | 1102 | } | 
 | 1103 | EXPORT_SYMBOL(netdev_bonding_change); | 
 | 1104 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | /** | 
 | 1106 |  *	dev_load 	- load a network module | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 1107 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 |  *	@name: name of interface | 
 | 1109 |  * | 
 | 1110 |  *	If a network interface is not present and the process has suitable | 
 | 1111 |  *	privileges this function loads the module. If module loading is not | 
 | 1112 |  *	available in this kernel then it becomes a nop. | 
 | 1113 |  */ | 
 | 1114 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1115 | void dev_load(struct net *net, const char *name) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | { | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1117 | 	struct net_device *dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 |  | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 1119 | 	rcu_read_lock(); | 
 | 1120 | 	dev = dev_get_by_name_rcu(net, name); | 
 | 1121 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 |  | 
| Eric Paris | a8f80e8 | 2009-08-13 09:44:51 -0400 | [diff] [blame] | 1123 | 	if (!dev && capable(CAP_NET_ADMIN)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | 		request_module("%s", name); | 
 | 1125 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1126 | EXPORT_SYMBOL(dev_load); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 |  | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1128 | static int __dev_open(struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | { | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1130 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
| Johannes Berg | 3b8bcfd | 2009-05-30 01:39:53 +0200 | [diff] [blame] | 1131 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 |  | 
| Ben Hutchings | e46b66b | 2008-05-08 02:53:17 -0700 | [diff] [blame] | 1133 | 	ASSERT_RTNL(); | 
 | 1134 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 | 	 *	Is it even present? | 
 | 1137 | 	 */ | 
 | 1138 | 	if (!netif_device_present(dev)) | 
 | 1139 | 		return -ENODEV; | 
 | 1140 |  | 
| Johannes Berg | 3b8bcfd | 2009-05-30 01:39:53 +0200 | [diff] [blame] | 1141 | 	ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); | 
 | 1142 | 	ret = notifier_to_errno(ret); | 
 | 1143 | 	if (ret) | 
 | 1144 | 		return ret; | 
 | 1145 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | 	/* | 
 | 1147 | 	 *	Call device private open method | 
 | 1148 | 	 */ | 
 | 1149 | 	set_bit(__LINK_STATE_START, &dev->state); | 
| Jeff Garzik | bada339 | 2007-10-23 20:19:37 -0700 | [diff] [blame] | 1150 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1151 | 	if (ops->ndo_validate_addr) | 
 | 1152 | 		ret = ops->ndo_validate_addr(dev); | 
| Jeff Garzik | bada339 | 2007-10-23 20:19:37 -0700 | [diff] [blame] | 1153 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1154 | 	if (!ret && ops->ndo_open) | 
 | 1155 | 		ret = ops->ndo_open(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 |  | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1157 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | 	 *	If it went open OK then: | 
 | 1159 | 	 */ | 
 | 1160 |  | 
| Jeff Garzik | bada339 | 2007-10-23 20:19:37 -0700 | [diff] [blame] | 1161 | 	if (ret) | 
 | 1162 | 		clear_bit(__LINK_STATE_START, &dev->state); | 
 | 1163 | 	else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1164 | 		/* | 
 | 1165 | 		 *	Set the flags. | 
 | 1166 | 		 */ | 
 | 1167 | 		dev->flags |= IFF_UP; | 
 | 1168 |  | 
 | 1169 | 		/* | 
| Dan Williams | 649274d | 2009-01-11 00:20:39 -0800 | [diff] [blame] | 1170 | 		 *	Enable NET_DMA | 
 | 1171 | 		 */ | 
| David S. Miller | b4bd07c | 2009-02-06 22:06:43 -0800 | [diff] [blame] | 1172 | 		net_dmaengine_get(); | 
| Dan Williams | 649274d | 2009-01-11 00:20:39 -0800 | [diff] [blame] | 1173 |  | 
 | 1174 | 		/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | 		 *	Initialize multicasting status | 
 | 1176 | 		 */ | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 1177 | 		dev_set_rx_mode(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 |  | 
 | 1179 | 		/* | 
 | 1180 | 		 *	Wakeup transmit queue engine | 
 | 1181 | 		 */ | 
 | 1182 | 		dev_activate(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | 	} | 
| Jeff Garzik | bada339 | 2007-10-23 20:19:37 -0700 | [diff] [blame] | 1184 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 | 	return ret; | 
 | 1186 | } | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1187 |  | 
 | 1188 | /** | 
 | 1189 |  *	dev_open	- prepare an interface for use. | 
 | 1190 |  *	@dev:	device to open | 
 | 1191 |  * | 
 | 1192 |  *	Takes a device from down to up state. The device's private open | 
 | 1193 |  *	function is invoked and then the multicast lists are loaded. Finally | 
 | 1194 |  *	the device is moved into the up state and a %NETDEV_UP message is | 
 | 1195 |  *	sent to the netdev notifier chain. | 
 | 1196 |  * | 
 | 1197 |  *	Calling this function on an active interface is a nop. On a failure | 
 | 1198 |  *	a negative errno code is returned. | 
 | 1199 |  */ | 
 | 1200 | int dev_open(struct net_device *dev) | 
 | 1201 | { | 
 | 1202 | 	int ret; | 
 | 1203 |  | 
 | 1204 | 	/* | 
 | 1205 | 	 *	Is it already up? | 
 | 1206 | 	 */ | 
 | 1207 | 	if (dev->flags & IFF_UP) | 
 | 1208 | 		return 0; | 
 | 1209 |  | 
 | 1210 | 	/* | 
 | 1211 | 	 *	Open device | 
 | 1212 | 	 */ | 
 | 1213 | 	ret = __dev_open(dev); | 
 | 1214 | 	if (ret < 0) | 
 | 1215 | 		return ret; | 
 | 1216 |  | 
 | 1217 | 	/* | 
 | 1218 | 	 *	... and announce new interface. | 
 | 1219 | 	 */ | 
 | 1220 | 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 
 | 1221 | 	call_netdevice_notifiers(NETDEV_UP, dev); | 
 | 1222 |  | 
 | 1223 | 	return ret; | 
 | 1224 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1225 | EXPORT_SYMBOL(dev_open); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1227 | static int __dev_close_many(struct list_head *head) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | { | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1229 | 	struct net_device *dev; | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1230 |  | 
| Ben Hutchings | e46b66b | 2008-05-08 02:53:17 -0700 | [diff] [blame] | 1231 | 	ASSERT_RTNL(); | 
| David S. Miller | 9d5010d | 2007-09-12 14:33:25 +0200 | [diff] [blame] | 1232 | 	might_sleep(); | 
 | 1233 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1234 | 	list_for_each_entry(dev, head, unreg_list) { | 
 | 1235 | 		/* | 
 | 1236 | 		 *	Tell people we are going down, so that they can | 
 | 1237 | 		 *	prepare to death, when device is still operating. | 
 | 1238 | 		 */ | 
 | 1239 | 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1240 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1241 | 		clear_bit(__LINK_STATE_START, &dev->state); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1242 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1243 | 		/* Synchronize to scheduled poll. We cannot touch poll list, it | 
 | 1244 | 		 * can be even on different cpu. So just clear netif_running(). | 
 | 1245 | 		 * | 
 | 1246 | 		 * dev->stop() will invoke napi_disable() on all of it's | 
 | 1247 | 		 * napi_struct instances on this device. | 
 | 1248 | 		 */ | 
 | 1249 | 		smp_mb__after_clear_bit(); /* Commit netif_running(). */ | 
 | 1250 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1252 | 	dev_deactivate_many(head); | 
 | 1253 |  | 
 | 1254 | 	list_for_each_entry(dev, head, unreg_list) { | 
 | 1255 | 		const struct net_device_ops *ops = dev->netdev_ops; | 
 | 1256 |  | 
 | 1257 | 		/* | 
 | 1258 | 		 *	Call the device specific close. This cannot fail. | 
 | 1259 | 		 *	Only if device is UP | 
 | 1260 | 		 * | 
 | 1261 | 		 *	We allow it to be called even after a DETACH hot-plug | 
 | 1262 | 		 *	event. | 
 | 1263 | 		 */ | 
 | 1264 | 		if (ops->ndo_stop) | 
 | 1265 | 			ops->ndo_stop(dev); | 
 | 1266 |  | 
 | 1267 | 		/* | 
 | 1268 | 		 *	Device is now down. | 
 | 1269 | 		 */ | 
 | 1270 |  | 
 | 1271 | 		dev->flags &= ~IFF_UP; | 
 | 1272 |  | 
 | 1273 | 		/* | 
 | 1274 | 		 *	Shutdown NET_DMA | 
 | 1275 | 		 */ | 
 | 1276 | 		net_dmaengine_put(); | 
 | 1277 | 	} | 
 | 1278 |  | 
 | 1279 | 	return 0; | 
 | 1280 | } | 
 | 1281 |  | 
 | 1282 | static int __dev_close(struct net_device *dev) | 
 | 1283 | { | 
 | 1284 | 	LIST_HEAD(single); | 
 | 1285 |  | 
 | 1286 | 	list_add(&dev->unreg_list, &single); | 
 | 1287 | 	return __dev_close_many(&single); | 
 | 1288 | } | 
 | 1289 |  | 
| Eric Dumazet | 3fbd875 | 2011-01-19 21:23:22 +0000 | [diff] [blame] | 1290 | static int dev_close_many(struct list_head *head) | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1291 | { | 
 | 1292 | 	struct net_device *dev, *tmp; | 
 | 1293 | 	LIST_HEAD(tmp_list); | 
 | 1294 |  | 
 | 1295 | 	list_for_each_entry_safe(dev, tmp, head, unreg_list) | 
 | 1296 | 		if (!(dev->flags & IFF_UP)) | 
 | 1297 | 			list_move(&dev->unreg_list, &tmp_list); | 
 | 1298 |  | 
 | 1299 | 	__dev_close_many(head); | 
| Matti Linnanvuori | d8b2a4d | 2008-02-12 23:10:11 -0800 | [diff] [blame] | 1300 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | 	/* | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1302 | 	 * Tell people we are down | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | 	 */ | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1304 | 	list_for_each_entry(dev, head, unreg_list) { | 
 | 1305 | 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 
 | 1306 | 		call_netdevice_notifiers(NETDEV_DOWN, dev); | 
 | 1307 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1309 | 	/* rollback_registered_many needs the complete original list */ | 
 | 1310 | 	list_splice(&tmp_list, head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 | 	return 0; | 
 | 1312 | } | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1313 |  | 
 | 1314 | /** | 
 | 1315 |  *	dev_close - shutdown an interface. | 
 | 1316 |  *	@dev: device to shutdown | 
 | 1317 |  * | 
 | 1318 |  *	This function moves an active device into down state. A | 
 | 1319 |  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | 
 | 1320 |  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | 
 | 1321 |  *	chain. | 
 | 1322 |  */ | 
 | 1323 | int dev_close(struct net_device *dev) | 
 | 1324 | { | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1325 | 	LIST_HEAD(single); | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1326 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 1327 | 	list_add(&dev->unreg_list, &single); | 
 | 1328 | 	dev_close_many(&single); | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1329 |  | 
 | 1330 | 	return 0; | 
 | 1331 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1332 | EXPORT_SYMBOL(dev_close); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 |  | 
 | 1334 |  | 
| Ben Hutchings | 0187bdf | 2008-06-19 16:15:47 -0700 | [diff] [blame] | 1335 | /** | 
 | 1336 |  *	dev_disable_lro - disable Large Receive Offload on a device | 
 | 1337 |  *	@dev: device | 
 | 1338 |  * | 
 | 1339 |  *	Disable Large Receive Offload (LRO) on a net device.  Must be | 
 | 1340 |  *	called under RTNL.  This is needed if received packets may be | 
 | 1341 |  *	forwarded to another interface. | 
 | 1342 |  */ | 
 | 1343 | void dev_disable_lro(struct net_device *dev) | 
 | 1344 | { | 
 | 1345 | 	if (dev->ethtool_ops && dev->ethtool_ops->get_flags && | 
 | 1346 | 	    dev->ethtool_ops->set_flags) { | 
 | 1347 | 		u32 flags = dev->ethtool_ops->get_flags(dev); | 
 | 1348 | 		if (flags & ETH_FLAG_LRO) { | 
 | 1349 | 			flags &= ~ETH_FLAG_LRO; | 
 | 1350 | 			dev->ethtool_ops->set_flags(dev, flags); | 
 | 1351 | 		} | 
 | 1352 | 	} | 
 | 1353 | 	WARN_ON(dev->features & NETIF_F_LRO); | 
 | 1354 | } | 
 | 1355 | EXPORT_SYMBOL(dev_disable_lro); | 
 | 1356 |  | 
 | 1357 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1358 | static int dev_boot_phase = 1; | 
 | 1359 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | /* | 
 | 1361 |  *	Device change register/unregister. These are not inline or static | 
 | 1362 |  *	as we export them to the world. | 
 | 1363 |  */ | 
 | 1364 |  | 
 | 1365 | /** | 
 | 1366 |  *	register_netdevice_notifier - register a network notifier block | 
 | 1367 |  *	@nb: notifier | 
 | 1368 |  * | 
 | 1369 |  *	Register a notifier to be called when network device events occur. | 
 | 1370 |  *	The notifier passed is linked into the kernel structures and must | 
 | 1371 |  *	not be reused until it has been unregistered. A negative errno code | 
 | 1372 |  *	is returned on a failure. | 
 | 1373 |  * | 
 | 1374 |  * 	When registered all registration and up events are replayed | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1375 |  *	to the new notifier to allow device to have a race free | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 |  *	view of the network device list. | 
 | 1377 |  */ | 
 | 1378 |  | 
 | 1379 | int register_netdevice_notifier(struct notifier_block *nb) | 
 | 1380 | { | 
 | 1381 | 	struct net_device *dev; | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1382 | 	struct net_device *last; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1383 | 	struct net *net; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1384 | 	int err; | 
 | 1385 |  | 
 | 1386 | 	rtnl_lock(); | 
| Alan Stern | f07d5b9 | 2006-05-09 15:23:03 -0700 | [diff] [blame] | 1387 | 	err = raw_notifier_chain_register(&netdev_chain, nb); | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1388 | 	if (err) | 
 | 1389 | 		goto unlock; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1390 | 	if (dev_boot_phase) | 
 | 1391 | 		goto unlock; | 
 | 1392 | 	for_each_net(net) { | 
 | 1393 | 		for_each_netdev(net, dev) { | 
 | 1394 | 			err = nb->notifier_call(nb, NETDEV_REGISTER, dev); | 
 | 1395 | 			err = notifier_to_errno(err); | 
 | 1396 | 			if (err) | 
 | 1397 | 				goto rollback; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1398 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1399 | 			if (!(dev->flags & IFF_UP)) | 
 | 1400 | 				continue; | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1401 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1402 | 			nb->notifier_call(nb, NETDEV_UP, dev); | 
 | 1403 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | 	} | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1405 |  | 
 | 1406 | unlock: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1407 | 	rtnl_unlock(); | 
 | 1408 | 	return err; | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1409 |  | 
 | 1410 | rollback: | 
 | 1411 | 	last = dev; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1412 | 	for_each_net(net) { | 
 | 1413 | 		for_each_netdev(net, dev) { | 
 | 1414 | 			if (dev == last) | 
 | 1415 | 				break; | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1416 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1417 | 			if (dev->flags & IFF_UP) { | 
 | 1418 | 				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); | 
 | 1419 | 				nb->notifier_call(nb, NETDEV_DOWN, dev); | 
 | 1420 | 			} | 
 | 1421 | 			nb->notifier_call(nb, NETDEV_UNREGISTER, dev); | 
| Eric W. Biederman | a5ee155 | 2009-11-29 15:45:58 +0000 | [diff] [blame] | 1422 | 			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1423 | 		} | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1424 | 	} | 
| Pavel Emelyanov | c67625a | 2007-11-14 15:53:16 -0800 | [diff] [blame] | 1425 |  | 
 | 1426 | 	raw_notifier_chain_unregister(&netdev_chain, nb); | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 1427 | 	goto unlock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1429 | EXPORT_SYMBOL(register_netdevice_notifier); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 |  | 
 | 1431 | /** | 
 | 1432 |  *	unregister_netdevice_notifier - unregister a network notifier block | 
 | 1433 |  *	@nb: notifier | 
 | 1434 |  * | 
 | 1435 |  *	Unregister a notifier previously registered by | 
 | 1436 |  *	register_netdevice_notifier(). The notifier is unlinked into the | 
 | 1437 |  *	kernel structures and may then be reused. A negative errno code | 
 | 1438 |  *	is returned on a failure. | 
 | 1439 |  */ | 
 | 1440 |  | 
 | 1441 | int unregister_netdevice_notifier(struct notifier_block *nb) | 
 | 1442 | { | 
| Herbert Xu | 9f51495 | 2006-03-25 01:24:25 -0800 | [diff] [blame] | 1443 | 	int err; | 
 | 1444 |  | 
 | 1445 | 	rtnl_lock(); | 
| Alan Stern | f07d5b9 | 2006-05-09 15:23:03 -0700 | [diff] [blame] | 1446 | 	err = raw_notifier_chain_unregister(&netdev_chain, nb); | 
| Herbert Xu | 9f51495 | 2006-03-25 01:24:25 -0800 | [diff] [blame] | 1447 | 	rtnl_unlock(); | 
 | 1448 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1449 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1450 | EXPORT_SYMBOL(unregister_netdevice_notifier); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 |  | 
 | 1452 | /** | 
 | 1453 |  *	call_netdevice_notifiers - call all network notifier blocks | 
 | 1454 |  *      @val: value passed unmodified to notifier function | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 1455 |  *      @dev: net_device pointer passed unmodified to notifier function | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 |  * | 
 | 1457 |  *	Call all network notifier blocks.  Parameters and return value | 
| Alan Stern | f07d5b9 | 2006-05-09 15:23:03 -0700 | [diff] [blame] | 1458 |  *	are as for raw_notifier_call_chain(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1459 |  */ | 
 | 1460 |  | 
| Eric W. Biederman | ad7379d | 2007-09-16 15:33:32 -0700 | [diff] [blame] | 1461 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | { | 
| Jiri Pirko | ab93047 | 2010-04-20 01:45:37 -0700 | [diff] [blame] | 1463 | 	ASSERT_RTNL(); | 
| Eric W. Biederman | ad7379d | 2007-09-16 15:33:32 -0700 | [diff] [blame] | 1464 | 	return raw_notifier_call_chain(&netdev_chain, val, dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1465 | } | 
 | 1466 |  | 
 | 1467 | /* When > 0 there are consumers of rx skb time stamps */ | 
 | 1468 | static atomic_t netstamp_needed = ATOMIC_INIT(0); | 
 | 1469 |  | 
 | 1470 | void net_enable_timestamp(void) | 
 | 1471 | { | 
 | 1472 | 	atomic_inc(&netstamp_needed); | 
 | 1473 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1474 | EXPORT_SYMBOL(net_enable_timestamp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1475 |  | 
 | 1476 | void net_disable_timestamp(void) | 
 | 1477 | { | 
 | 1478 | 	atomic_dec(&netstamp_needed); | 
 | 1479 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1480 | EXPORT_SYMBOL(net_disable_timestamp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 |  | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 1482 | static inline void net_timestamp_set(struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | { | 
 | 1484 | 	if (atomic_read(&netstamp_needed)) | 
| Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 1485 | 		__net_timestamp(skb); | 
| Eric Dumazet | b7aa0bf | 2007-04-19 16:16:32 -0700 | [diff] [blame] | 1486 | 	else | 
 | 1487 | 		skb->tstamp.tv64 = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1488 | } | 
 | 1489 |  | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 1490 | static inline void net_timestamp_check(struct sk_buff *skb) | 
 | 1491 | { | 
 | 1492 | 	if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) | 
 | 1493 | 		__net_timestamp(skb); | 
 | 1494 | } | 
 | 1495 |  | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 1496 | /** | 
 | 1497 |  * dev_forward_skb - loopback an skb to another netif | 
 | 1498 |  * | 
 | 1499 |  * @dev: destination network device | 
 | 1500 |  * @skb: buffer to forward | 
 | 1501 |  * | 
 | 1502 |  * return values: | 
 | 1503 |  *	NET_RX_SUCCESS	(no congestion) | 
| Eric Dumazet | 6ec8256 | 2010-05-06 00:53:53 -0700 | [diff] [blame] | 1504 |  *	NET_RX_DROP     (packet was dropped, but freed) | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 1505 |  * | 
 | 1506 |  * dev_forward_skb can be used for injecting an skb from the | 
 | 1507 |  * start_xmit function of one device into the receive queue | 
 | 1508 |  * of another device. | 
 | 1509 |  * | 
 | 1510 |  * The receiving device may be in another namespace, so | 
 | 1511 |  * we have to clear all information in the skb that could | 
 | 1512 |  * impact namespace isolation. | 
 | 1513 |  */ | 
 | 1514 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | 
 | 1515 | { | 
 | 1516 | 	skb_orphan(skb); | 
| Ben Greear | c736eef | 2010-07-22 09:54:47 +0000 | [diff] [blame] | 1517 | 	nf_reset(skb); | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 1518 |  | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 1519 | 	if (unlikely(!(dev->flags & IFF_UP) || | 
| David S. Miller | 2198a10 | 2010-10-21 08:43:05 -0700 | [diff] [blame] | 1520 | 		     (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) { | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 1521 | 		atomic_long_inc(&dev->rx_dropped); | 
| Eric Dumazet | 6ec8256 | 2010-05-06 00:53:53 -0700 | [diff] [blame] | 1522 | 		kfree_skb(skb); | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 1523 | 		return NET_RX_DROP; | 
| Eric Dumazet | 6ec8256 | 2010-05-06 00:53:53 -0700 | [diff] [blame] | 1524 | 	} | 
| Arnd Bergmann | 8a83a00 | 2010-01-30 12:23:03 +0000 | [diff] [blame] | 1525 | 	skb_set_dev(skb, dev); | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 1526 | 	skb->tstamp.tv64 = 0; | 
 | 1527 | 	skb->pkt_type = PACKET_HOST; | 
 | 1528 | 	skb->protocol = eth_type_trans(skb, dev); | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 1529 | 	return netif_rx(skb); | 
 | 1530 | } | 
 | 1531 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 
 | 1532 |  | 
| Changli Gao | 71d9dec | 2010-12-15 19:57:25 +0000 | [diff] [blame] | 1533 | static inline int deliver_skb(struct sk_buff *skb, | 
 | 1534 | 			      struct packet_type *pt_prev, | 
 | 1535 | 			      struct net_device *orig_dev) | 
 | 1536 | { | 
 | 1537 | 	atomic_inc(&skb->users); | 
 | 1538 | 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 
 | 1539 | } | 
 | 1540 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1541 | /* | 
 | 1542 |  *	Support routine. Sends outgoing frames to any network | 
 | 1543 |  *	taps currently in use. | 
 | 1544 |  */ | 
 | 1545 |  | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1546 | static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 | { | 
 | 1548 | 	struct packet_type *ptype; | 
| Changli Gao | 71d9dec | 2010-12-15 19:57:25 +0000 | [diff] [blame] | 1549 | 	struct sk_buff *skb2 = NULL; | 
 | 1550 | 	struct packet_type *pt_prev = NULL; | 
| Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 1551 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | 	rcu_read_lock(); | 
 | 1553 | 	list_for_each_entry_rcu(ptype, &ptype_all, list) { | 
 | 1554 | 		/* Never send packets back to the socket | 
 | 1555 | 		 * they originated from - MvS (miquels@drinkel.ow.org) | 
 | 1556 | 		 */ | 
 | 1557 | 		if ((ptype->dev == dev || !ptype->dev) && | 
 | 1558 | 		    (ptype->af_packet_priv == NULL || | 
 | 1559 | 		     (struct sock *)ptype->af_packet_priv != skb->sk)) { | 
| Changli Gao | 71d9dec | 2010-12-15 19:57:25 +0000 | [diff] [blame] | 1560 | 			if (pt_prev) { | 
 | 1561 | 				deliver_skb(skb2, pt_prev, skb->dev); | 
 | 1562 | 				pt_prev = ptype; | 
 | 1563 | 				continue; | 
 | 1564 | 			} | 
 | 1565 |  | 
 | 1566 | 			skb2 = skb_clone(skb, GFP_ATOMIC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1567 | 			if (!skb2) | 
 | 1568 | 				break; | 
 | 1569 |  | 
| Eric Dumazet | 7097818 | 2010-12-20 21:22:51 +0000 | [diff] [blame] | 1570 | 			net_timestamp_set(skb2); | 
 | 1571 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | 			/* skb->nh should be correctly | 
 | 1573 | 			   set by sender, so that the second statement is | 
 | 1574 | 			   just protection against buggy protocols. | 
 | 1575 | 			 */ | 
| Arnaldo Carvalho de Melo | 459a98e | 2007-03-19 15:30:44 -0700 | [diff] [blame] | 1576 | 			skb_reset_mac_header(skb2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1577 |  | 
| Arnaldo Carvalho de Melo | d56f90a | 2007-04-10 20:50:43 -0700 | [diff] [blame] | 1578 | 			if (skb_network_header(skb2) < skb2->data || | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1579 | 			    skb2->network_header > skb2->tail) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | 				if (net_ratelimit()) | 
 | 1581 | 					printk(KERN_CRIT "protocol %04x is " | 
 | 1582 | 					       "buggy, dev %s\n", | 
| Sebastian Andrzej Siewior | 70777d0 | 2010-06-30 10:39:19 -0700 | [diff] [blame] | 1583 | 					       ntohs(skb2->protocol), | 
 | 1584 | 					       dev->name); | 
| Arnaldo Carvalho de Melo | c1d2bbe | 2007-04-10 20:45:18 -0700 | [diff] [blame] | 1585 | 				skb_reset_network_header(skb2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1586 | 			} | 
 | 1587 |  | 
| Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 1588 | 			skb2->transport_header = skb2->network_header; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | 			skb2->pkt_type = PACKET_OUTGOING; | 
| Changli Gao | 71d9dec | 2010-12-15 19:57:25 +0000 | [diff] [blame] | 1590 | 			pt_prev = ptype; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | 		} | 
 | 1592 | 	} | 
| Changli Gao | 71d9dec | 2010-12-15 19:57:25 +0000 | [diff] [blame] | 1593 | 	if (pt_prev) | 
 | 1594 | 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | 	rcu_read_unlock(); | 
 | 1596 | } | 
 | 1597 |  | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1598 | /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change | 
 | 1599 |  * @dev: Network device | 
 | 1600 |  * @txq: number of queues available | 
 | 1601 |  * | 
 | 1602 |  * If real_num_tx_queues is changed the tc mappings may no longer be | 
 | 1603 |  * valid. To resolve this verify the tc mapping remains valid and if | 
 | 1604 |  * not NULL the mapping. With no priorities mapping to this | 
 | 1605 |  * offset/count pair it will no longer be used. In the worst case TC0 | 
 | 1606 |  * is invalid nothing can be done so disable priority mappings. If is | 
 | 1607 |  * expected that drivers will fix this mapping if they can before | 
 | 1608 |  * calling netif_set_real_num_tx_queues. | 
 | 1609 |  */ | 
| Eric Dumazet | bb134d2 | 2011-01-20 19:18:08 +0000 | [diff] [blame] | 1610 | static void netif_setup_tc(struct net_device *dev, unsigned int txq) | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1611 | { | 
 | 1612 | 	int i; | 
 | 1613 | 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; | 
 | 1614 |  | 
 | 1615 | 	/* If TC0 is invalidated disable TC mapping */ | 
 | 1616 | 	if (tc->offset + tc->count > txq) { | 
 | 1617 | 		pr_warning("Number of in use tx queues changed " | 
 | 1618 | 			   "invalidating tc mappings. Priority " | 
 | 1619 | 			   "traffic classification disabled!\n"); | 
 | 1620 | 		dev->num_tc = 0; | 
 | 1621 | 		return; | 
 | 1622 | 	} | 
 | 1623 |  | 
 | 1624 | 	/* Invalidated prio to tc mappings set to TC0 */ | 
 | 1625 | 	for (i = 1; i < TC_BITMASK + 1; i++) { | 
 | 1626 | 		int q = netdev_get_prio_tc_map(dev, i); | 
 | 1627 |  | 
 | 1628 | 		tc = &dev->tc_to_txq[q]; | 
 | 1629 | 		if (tc->offset + tc->count > txq) { | 
 | 1630 | 			pr_warning("Number of in use tx queues " | 
 | 1631 | 				   "changed. Priority %i to tc " | 
 | 1632 | 				   "mapping %i is no longer valid " | 
 | 1633 | 				   "setting map to 0\n", | 
 | 1634 | 				   i, q); | 
 | 1635 | 			netdev_set_prio_tc_map(dev, i, 0); | 
 | 1636 | 		} | 
 | 1637 | 	} | 
 | 1638 | } | 
 | 1639 |  | 
| John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 1640 | /* | 
 | 1641 |  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues | 
 | 1642 |  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. | 
 | 1643 |  */ | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 1644 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | 
| John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 1645 | { | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1646 | 	int rc; | 
 | 1647 |  | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 1648 | 	if (txq < 1 || txq > dev->num_tx_queues) | 
 | 1649 | 		return -EINVAL; | 
| John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 1650 |  | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 1651 | 	if (dev->reg_state == NETREG_REGISTERED) { | 
 | 1652 | 		ASSERT_RTNL(); | 
 | 1653 |  | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1654 | 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, | 
 | 1655 | 						  txq); | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1656 | 		if (rc) | 
 | 1657 | 			return rc; | 
 | 1658 |  | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1659 | 		if (dev->num_tc) | 
 | 1660 | 			netif_setup_tc(dev, txq); | 
 | 1661 |  | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 1662 | 		if (txq < dev->real_num_tx_queues) | 
 | 1663 | 			qdisc_reset_all_tx_gt(dev, txq); | 
| John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 1664 | 	} | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 1665 |  | 
 | 1666 | 	dev->real_num_tx_queues = txq; | 
 | 1667 | 	return 0; | 
| John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 1668 | } | 
 | 1669 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1670 |  | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1671 | #ifdef CONFIG_RPS | 
 | 1672 | /** | 
 | 1673 |  *	netif_set_real_num_rx_queues - set actual number of RX queues used | 
 | 1674 |  *	@dev: Network device | 
 | 1675 |  *	@rxq: Actual number of RX queues | 
 | 1676 |  * | 
 | 1677 |  *	This must be called either with the rtnl_lock held or before | 
 | 1678 |  *	registration of the net device.  Returns 0 on success, or a | 
| Ben Hutchings | 4e7f795 | 2010-10-08 10:33:39 -0700 | [diff] [blame] | 1679 |  *	negative error code.  If called before registration, it always | 
 | 1680 |  *	succeeds. | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1681 |  */ | 
 | 1682 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) | 
 | 1683 | { | 
 | 1684 | 	int rc; | 
 | 1685 |  | 
| Tom Herbert | bd25fa7 | 2010-10-18 18:00:16 +0000 | [diff] [blame] | 1686 | 	if (rxq < 1 || rxq > dev->num_rx_queues) | 
 | 1687 | 		return -EINVAL; | 
 | 1688 |  | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1689 | 	if (dev->reg_state == NETREG_REGISTERED) { | 
 | 1690 | 		ASSERT_RTNL(); | 
 | 1691 |  | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1692 | 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, | 
 | 1693 | 						  rxq); | 
 | 1694 | 		if (rc) | 
 | 1695 | 			return rc; | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1696 | 	} | 
 | 1697 |  | 
 | 1698 | 	dev->real_num_rx_queues = rxq; | 
 | 1699 | 	return 0; | 
 | 1700 | } | 
 | 1701 | EXPORT_SYMBOL(netif_set_real_num_rx_queues); | 
 | 1702 | #endif | 
 | 1703 |  | 
| Jarek Poplawski | def82a1 | 2008-08-17 21:54:43 -0700 | [diff] [blame] | 1704 | static inline void __netif_reschedule(struct Qdisc *q) | 
 | 1705 | { | 
 | 1706 | 	struct softnet_data *sd; | 
 | 1707 | 	unsigned long flags; | 
 | 1708 |  | 
 | 1709 | 	local_irq_save(flags); | 
 | 1710 | 	sd = &__get_cpu_var(softnet_data); | 
| Changli Gao | a9cbd58 | 2010-04-26 23:06:24 +0000 | [diff] [blame] | 1711 | 	q->next_sched = NULL; | 
 | 1712 | 	*sd->output_queue_tailp = q; | 
 | 1713 | 	sd->output_queue_tailp = &q->next_sched; | 
| Jarek Poplawski | def82a1 | 2008-08-17 21:54:43 -0700 | [diff] [blame] | 1714 | 	raise_softirq_irqoff(NET_TX_SOFTIRQ); | 
 | 1715 | 	local_irq_restore(flags); | 
 | 1716 | } | 
 | 1717 |  | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1718 | void __netif_schedule(struct Qdisc *q) | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1719 | { | 
| Jarek Poplawski | def82a1 | 2008-08-17 21:54:43 -0700 | [diff] [blame] | 1720 | 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) | 
 | 1721 | 		__netif_reschedule(q); | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1722 | } | 
 | 1723 | EXPORT_SYMBOL(__netif_schedule); | 
 | 1724 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1725 | void dev_kfree_skb_irq(struct sk_buff *skb) | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1726 | { | 
| David S. Miller | 3578b0c | 2010-08-03 00:24:04 -0700 | [diff] [blame] | 1727 | 	if (atomic_dec_and_test(&skb->users)) { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1728 | 		struct softnet_data *sd; | 
 | 1729 | 		unsigned long flags; | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1730 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1731 | 		local_irq_save(flags); | 
 | 1732 | 		sd = &__get_cpu_var(softnet_data); | 
 | 1733 | 		skb->next = sd->completion_queue; | 
 | 1734 | 		sd->completion_queue = skb; | 
 | 1735 | 		raise_softirq_irqoff(NET_TX_SOFTIRQ); | 
 | 1736 | 		local_irq_restore(flags); | 
 | 1737 | 	} | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1738 | } | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1739 | EXPORT_SYMBOL(dev_kfree_skb_irq); | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1740 |  | 
 | 1741 | void dev_kfree_skb_any(struct sk_buff *skb) | 
 | 1742 | { | 
 | 1743 | 	if (in_irq() || irqs_disabled()) | 
 | 1744 | 		dev_kfree_skb_irq(skb); | 
 | 1745 | 	else | 
 | 1746 | 		dev_kfree_skb(skb); | 
 | 1747 | } | 
 | 1748 | EXPORT_SYMBOL(dev_kfree_skb_any); | 
 | 1749 |  | 
 | 1750 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1751 | /** | 
 | 1752 |  * netif_device_detach - mark device as removed | 
 | 1753 |  * @dev: network device | 
 | 1754 |  * | 
 | 1755 |  * Mark device as removed from system and therefore no longer available. | 
 | 1756 |  */ | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1757 | void netif_device_detach(struct net_device *dev) | 
 | 1758 | { | 
 | 1759 | 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | 
 | 1760 | 	    netif_running(dev)) { | 
| Alexander Duyck | d543103 | 2009-04-08 13:15:22 +0000 | [diff] [blame] | 1761 | 		netif_tx_stop_all_queues(dev); | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1762 | 	} | 
 | 1763 | } | 
 | 1764 | EXPORT_SYMBOL(netif_device_detach); | 
 | 1765 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1766 | /** | 
 | 1767 |  * netif_device_attach - mark device as attached | 
 | 1768 |  * @dev: network device | 
 | 1769 |  * | 
 | 1770 |  * Mark device as attached from system and restart if needed. | 
 | 1771 |  */ | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1772 | void netif_device_attach(struct net_device *dev) | 
 | 1773 | { | 
 | 1774 | 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | 
 | 1775 | 	    netif_running(dev)) { | 
| Alexander Duyck | d543103 | 2009-04-08 13:15:22 +0000 | [diff] [blame] | 1776 | 		netif_tx_wake_all_queues(dev); | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1777 | 		__netdev_watchdog_up(dev); | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1778 | 	} | 
 | 1779 | } | 
 | 1780 | EXPORT_SYMBOL(netif_device_attach); | 
 | 1781 |  | 
| Arnd Bergmann | 8a83a00 | 2010-01-30 12:23:03 +0000 | [diff] [blame] | 1782 | /** | 
 | 1783 |  * skb_dev_set -- assign a new device to a buffer | 
 | 1784 |  * @skb: buffer for the new device | 
 | 1785 |  * @dev: network device | 
 | 1786 |  * | 
 | 1787 |  * If an skb is owned by a device already, we have to reset | 
 | 1788 |  * all data private to the namespace a device belongs to | 
 | 1789 |  * before assigning it a new device. | 
 | 1790 |  */ | 
 | 1791 | #ifdef CONFIG_NET_NS | 
 | 1792 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | 
 | 1793 | { | 
 | 1794 | 	skb_dst_drop(skb); | 
 | 1795 | 	if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { | 
 | 1796 | 		secpath_reset(skb); | 
 | 1797 | 		nf_reset(skb); | 
 | 1798 | 		skb_init_secmark(skb); | 
 | 1799 | 		skb->mark = 0; | 
 | 1800 | 		skb->priority = 0; | 
 | 1801 | 		skb->nf_trace = 0; | 
 | 1802 | 		skb->ipvs_property = 0; | 
 | 1803 | #ifdef CONFIG_NET_SCHED | 
 | 1804 | 		skb->tc_index = 0; | 
 | 1805 | #endif | 
 | 1806 | 	} | 
 | 1807 | 	skb->dev = dev; | 
 | 1808 | } | 
 | 1809 | EXPORT_SYMBOL(skb_set_dev); | 
 | 1810 | #endif /* CONFIG_NET_NS */ | 
 | 1811 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 | /* | 
 | 1813 |  * Invalidate hardware checksum when packet is to be mangled, and | 
 | 1814 |  * complete checksum manually on outgoing path. | 
 | 1815 |  */ | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 1816 | int skb_checksum_help(struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1817 | { | 
| Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 1818 | 	__wsum csum; | 
| Herbert Xu | 663ead3 | 2007-04-09 11:59:07 -0700 | [diff] [blame] | 1819 | 	int ret = 0, offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1820 |  | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 1821 | 	if (skb->ip_summed == CHECKSUM_COMPLETE) | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1822 | 		goto out_set_summed; | 
 | 1823 |  | 
 | 1824 | 	if (unlikely(skb_shinfo(skb)->gso_size)) { | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1825 | 		/* Let GSO fix up the checksum. */ | 
 | 1826 | 		goto out_set_summed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | 	} | 
 | 1828 |  | 
| Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 1829 | 	offset = skb_checksum_start_offset(skb); | 
| Herbert Xu | a030847 | 2007-10-15 01:47:15 -0700 | [diff] [blame] | 1830 | 	BUG_ON(offset >= skb_headlen(skb)); | 
 | 1831 | 	csum = skb_checksum(skb, offset, skb->len - offset, 0); | 
 | 1832 |  | 
 | 1833 | 	offset += skb->csum_offset; | 
 | 1834 | 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); | 
 | 1835 |  | 
 | 1836 | 	if (skb_cloned(skb) && | 
 | 1837 | 	    !skb_clone_writable(skb, offset + sizeof(__sum16))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1838 | 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 
 | 1839 | 		if (ret) | 
 | 1840 | 			goto out; | 
 | 1841 | 	} | 
 | 1842 |  | 
| Herbert Xu | a030847 | 2007-10-15 01:47:15 -0700 | [diff] [blame] | 1843 | 	*(__sum16 *)(skb->data + offset) = csum_fold(csum); | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1844 | out_set_summed: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 | 	skb->ip_summed = CHECKSUM_NONE; | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1846 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1847 | 	return ret; | 
 | 1848 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 1849 | EXPORT_SYMBOL(skb_checksum_help); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1850 |  | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1851 | /** | 
 | 1852 |  *	skb_gso_segment - Perform segmentation on skb. | 
 | 1853 |  *	@skb: buffer to segment | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1854 |  *	@features: features for the output path (see dev->features) | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1855 |  * | 
 | 1856 |  *	This function segments the given skb and returns a list of segments. | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1857 |  * | 
 | 1858 |  *	It may return NULL if the skb requires no segmentation.  This is | 
 | 1859 |  *	only possible when GSO is used for verifying header integrity. | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1860 |  */ | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 1861 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features) | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1862 | { | 
 | 1863 | 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 
 | 1864 | 	struct packet_type *ptype; | 
| Al Viro | 252e334 | 2006-11-14 20:48:11 -0800 | [diff] [blame] | 1865 | 	__be16 type = skb->protocol; | 
| Jesse Gross | c8d5bcd | 2010-10-29 12:14:54 +0000 | [diff] [blame] | 1866 | 	int vlan_depth = ETH_HLEN; | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1867 | 	int err; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1868 |  | 
| Jesse Gross | c8d5bcd | 2010-10-29 12:14:54 +0000 | [diff] [blame] | 1869 | 	while (type == htons(ETH_P_8021Q)) { | 
 | 1870 | 		struct vlan_hdr *vh; | 
| Jesse Gross | 7b9c609 | 2010-10-20 13:56:04 +0000 | [diff] [blame] | 1871 |  | 
| Jesse Gross | c8d5bcd | 2010-10-29 12:14:54 +0000 | [diff] [blame] | 1872 | 		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) | 
| Jesse Gross | 7b9c609 | 2010-10-20 13:56:04 +0000 | [diff] [blame] | 1873 | 			return ERR_PTR(-EINVAL); | 
 | 1874 |  | 
| Jesse Gross | c8d5bcd | 2010-10-29 12:14:54 +0000 | [diff] [blame] | 1875 | 		vh = (struct vlan_hdr *)(skb->data + vlan_depth); | 
 | 1876 | 		type = vh->h_vlan_encapsulated_proto; | 
 | 1877 | 		vlan_depth += VLAN_HLEN; | 
| Jesse Gross | 7b9c609 | 2010-10-20 13:56:04 +0000 | [diff] [blame] | 1878 | 	} | 
 | 1879 |  | 
| Arnaldo Carvalho de Melo | 459a98e | 2007-03-19 15:30:44 -0700 | [diff] [blame] | 1880 | 	skb_reset_mac_header(skb); | 
| Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 1881 | 	skb->mac_len = skb->network_header - skb->mac_header; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1882 | 	__skb_pull(skb, skb->mac_len); | 
 | 1883 |  | 
| Herbert Xu | 67fd1a7 | 2009-01-19 16:26:44 -0800 | [diff] [blame] | 1884 | 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { | 
 | 1885 | 		struct net_device *dev = skb->dev; | 
 | 1886 | 		struct ethtool_drvinfo info = {}; | 
 | 1887 |  | 
 | 1888 | 		if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) | 
 | 1889 | 			dev->ethtool_ops->get_drvinfo(dev, &info); | 
 | 1890 |  | 
| Joe Perches | b194a36 | 2010-10-30 11:08:52 +0000 | [diff] [blame] | 1891 | 		WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n", | 
| Herbert Xu | 67fd1a7 | 2009-01-19 16:26:44 -0800 | [diff] [blame] | 1892 | 		     info.driver, dev ? dev->features : 0L, | 
 | 1893 | 		     skb->sk ? skb->sk->sk_route_caps : 0L, | 
 | 1894 | 		     skb->len, skb->data_len, skb->ip_summed); | 
 | 1895 |  | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1896 | 		if (skb_header_cloned(skb) && | 
 | 1897 | 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | 
 | 1898 | 			return ERR_PTR(err); | 
 | 1899 | 	} | 
 | 1900 |  | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1901 | 	rcu_read_lock(); | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 1902 | 	list_for_each_entry_rcu(ptype, | 
 | 1903 | 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1904 | 		if (ptype->type == type && !ptype->dev && ptype->gso_segment) { | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 1905 | 			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1906 | 				err = ptype->gso_send_check(skb); | 
 | 1907 | 				segs = ERR_PTR(err); | 
 | 1908 | 				if (err || skb_gso_ok(skb, features)) | 
 | 1909 | 					break; | 
| Arnaldo Carvalho de Melo | d56f90a | 2007-04-10 20:50:43 -0700 | [diff] [blame] | 1910 | 				__skb_push(skb, (skb->data - | 
 | 1911 | 						 skb_network_header(skb))); | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1912 | 			} | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1913 | 			segs = ptype->gso_segment(skb, features); | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1914 | 			break; | 
 | 1915 | 		} | 
 | 1916 | 	} | 
 | 1917 | 	rcu_read_unlock(); | 
 | 1918 |  | 
| Arnaldo Carvalho de Melo | 98e399f | 2007-03-19 15:33:04 -0700 | [diff] [blame] | 1919 | 	__skb_push(skb, skb->data - skb_mac_header(skb)); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1920 |  | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1921 | 	return segs; | 
 | 1922 | } | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1923 | EXPORT_SYMBOL(skb_gso_segment); | 
 | 1924 |  | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 1925 | /* Take action when hardware reception checksum errors are detected. */ | 
 | 1926 | #ifdef CONFIG_BUG | 
 | 1927 | void netdev_rx_csum_fault(struct net_device *dev) | 
 | 1928 | { | 
 | 1929 | 	if (net_ratelimit()) { | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1930 | 		printk(KERN_ERR "%s: hw csum failure.\n", | 
| Stephen Hemminger | 246a421 | 2005-12-08 15:21:39 -0800 | [diff] [blame] | 1931 | 			dev ? dev->name : "<unknown>"); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 1932 | 		dump_stack(); | 
 | 1933 | 	} | 
 | 1934 | } | 
 | 1935 | EXPORT_SYMBOL(netdev_rx_csum_fault); | 
 | 1936 | #endif | 
 | 1937 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1938 | /* Actually, we should eliminate this check as soon as we know, that: | 
 | 1939 |  * 1. IOMMU is present and allows to map all the memory. | 
 | 1940 |  * 2. No high memory really exists on this machine. | 
 | 1941 |  */ | 
 | 1942 |  | 
| Eric Dumazet | 9092c65 | 2010-04-02 13:34:49 -0700 | [diff] [blame] | 1943 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1944 | { | 
| Herbert Xu | 3d3a853 | 2006-06-27 13:33:10 -0700 | [diff] [blame] | 1945 | #ifdef CONFIG_HIGHMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1946 | 	int i; | 
| FUJITA Tomonori | 5acbbd4 | 2010-03-30 22:35:50 +0000 | [diff] [blame] | 1947 | 	if (!(dev->features & NETIF_F_HIGHDMA)) { | 
 | 1948 | 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 
 | 1949 | 			if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | 
 | 1950 | 				return 1; | 
 | 1951 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1952 |  | 
| FUJITA Tomonori | 5acbbd4 | 2010-03-30 22:35:50 +0000 | [diff] [blame] | 1953 | 	if (PCI_DMA_BUS_IS_PHYS) { | 
 | 1954 | 		struct device *pdev = dev->dev.parent; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 |  | 
| Eric Dumazet | 9092c65 | 2010-04-02 13:34:49 -0700 | [diff] [blame] | 1956 | 		if (!pdev) | 
 | 1957 | 			return 0; | 
| FUJITA Tomonori | 5acbbd4 | 2010-03-30 22:35:50 +0000 | [diff] [blame] | 1958 | 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
 | 1959 | 			dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); | 
 | 1960 | 			if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) | 
 | 1961 | 				return 1; | 
 | 1962 | 		} | 
 | 1963 | 	} | 
| Herbert Xu | 3d3a853 | 2006-06-27 13:33:10 -0700 | [diff] [blame] | 1964 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1965 | 	return 0; | 
 | 1966 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1967 |  | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1968 | struct dev_gso_cb { | 
 | 1969 | 	void (*destructor)(struct sk_buff *skb); | 
 | 1970 | }; | 
 | 1971 |  | 
 | 1972 | #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) | 
 | 1973 |  | 
 | 1974 | static void dev_gso_skb_destructor(struct sk_buff *skb) | 
 | 1975 | { | 
 | 1976 | 	struct dev_gso_cb *cb; | 
 | 1977 |  | 
 | 1978 | 	do { | 
 | 1979 | 		struct sk_buff *nskb = skb->next; | 
 | 1980 |  | 
 | 1981 | 		skb->next = nskb->next; | 
 | 1982 | 		nskb->next = NULL; | 
 | 1983 | 		kfree_skb(nskb); | 
 | 1984 | 	} while (skb->next); | 
 | 1985 |  | 
 | 1986 | 	cb = DEV_GSO_CB(skb); | 
 | 1987 | 	if (cb->destructor) | 
 | 1988 | 		cb->destructor(skb); | 
 | 1989 | } | 
 | 1990 |  | 
 | 1991 | /** | 
 | 1992 |  *	dev_gso_segment - Perform emulated hardware segmentation on skb. | 
 | 1993 |  *	@skb: buffer to segment | 
| Jesse Gross | 91ecb63 | 2011-01-09 06:23:33 +0000 | [diff] [blame] | 1994 |  *	@features: device features as applicable to this skb | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1995 |  * | 
 | 1996 |  *	This function segments the given skb and stores the list of segments | 
 | 1997 |  *	in skb->next. | 
 | 1998 |  */ | 
| Jesse Gross | 91ecb63 | 2011-01-09 06:23:33 +0000 | [diff] [blame] | 1999 | static int dev_gso_segment(struct sk_buff *skb, int features) | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2000 | { | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2001 | 	struct sk_buff *segs; | 
 | 2002 |  | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2003 | 	segs = skb_gso_segment(skb, features); | 
 | 2004 |  | 
 | 2005 | 	/* Verifying header integrity only. */ | 
 | 2006 | 	if (!segs) | 
 | 2007 | 		return 0; | 
 | 2008 |  | 
| Hirofumi Nakagawa | 801678c | 2008-04-29 01:03:09 -0700 | [diff] [blame] | 2009 | 	if (IS_ERR(segs)) | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2010 | 		return PTR_ERR(segs); | 
 | 2011 |  | 
 | 2012 | 	skb->next = segs; | 
 | 2013 | 	DEV_GSO_CB(skb)->destructor = skb->destructor; | 
 | 2014 | 	skb->destructor = dev_gso_skb_destructor; | 
 | 2015 |  | 
 | 2016 | 	return 0; | 
 | 2017 | } | 
 | 2018 |  | 
| Eric Dumazet | fc6055a | 2010-04-16 12:18:22 +0000 | [diff] [blame] | 2019 | /* | 
 | 2020 |  * Try to orphan skb early, right before transmission by the device. | 
| Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2021 |  * We cannot orphan skb if tx timestamp is requested or the sk-reference | 
 | 2022 |  * is needed on driver level for other reasons, e.g. see net/can/raw.c | 
| Eric Dumazet | fc6055a | 2010-04-16 12:18:22 +0000 | [diff] [blame] | 2023 |  */ | 
 | 2024 | static inline void skb_orphan_try(struct sk_buff *skb) | 
 | 2025 | { | 
| Eric Dumazet | 87fd308 | 2010-07-13 05:24:20 +0000 | [diff] [blame] | 2026 | 	struct sock *sk = skb->sk; | 
 | 2027 |  | 
| Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 2028 | 	if (sk && !skb_shinfo(skb)->tx_flags) { | 
| Eric Dumazet | 87fd308 | 2010-07-13 05:24:20 +0000 | [diff] [blame] | 2029 | 		/* skb_tx_hash() wont be able to get sk. | 
 | 2030 | 		 * We copy sk_hash into skb->rxhash | 
 | 2031 | 		 */ | 
 | 2032 | 		if (!skb->rxhash) | 
 | 2033 | 			skb->rxhash = sk->sk_hash; | 
| Eric Dumazet | fc6055a | 2010-04-16 12:18:22 +0000 | [diff] [blame] | 2034 | 		skb_orphan(skb); | 
| Eric Dumazet | 87fd308 | 2010-07-13 05:24:20 +0000 | [diff] [blame] | 2035 | 	} | 
| Eric Dumazet | fc6055a | 2010-04-16 12:18:22 +0000 | [diff] [blame] | 2036 | } | 
 | 2037 |  | 
| Jesse Gross | 0363466 | 2011-01-09 06:23:35 +0000 | [diff] [blame] | 2038 | static bool can_checksum_protocol(unsigned long features, __be16 protocol) | 
 | 2039 | { | 
 | 2040 | 	return ((features & NETIF_F_GEN_CSUM) || | 
 | 2041 | 		((features & NETIF_F_V4_CSUM) && | 
 | 2042 | 		 protocol == htons(ETH_P_IP)) || | 
 | 2043 | 		((features & NETIF_F_V6_CSUM) && | 
 | 2044 | 		 protocol == htons(ETH_P_IPV6)) || | 
 | 2045 | 		((features & NETIF_F_FCOE_CRC) && | 
 | 2046 | 		 protocol == htons(ETH_P_FCOE))); | 
 | 2047 | } | 
 | 2048 |  | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2049 | static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features) | 
| Jesse Gross | f01a523 | 2011-01-09 06:23:31 +0000 | [diff] [blame] | 2050 | { | 
| Eric Dumazet | d402786 | 2011-01-19 00:51:36 +0000 | [diff] [blame] | 2051 | 	if (!can_checksum_protocol(features, protocol)) { | 
| Jesse Gross | f01a523 | 2011-01-09 06:23:31 +0000 | [diff] [blame] | 2052 | 		features &= ~NETIF_F_ALL_CSUM; | 
 | 2053 | 		features &= ~NETIF_F_SG; | 
 | 2054 | 	} else if (illegal_highdma(skb->dev, skb)) { | 
 | 2055 | 		features &= ~NETIF_F_SG; | 
 | 2056 | 	} | 
 | 2057 |  | 
 | 2058 | 	return features; | 
 | 2059 | } | 
 | 2060 |  | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2061 | u32 netif_skb_features(struct sk_buff *skb) | 
| Jesse Gross | 58e998c | 2010-10-29 12:14:55 +0000 | [diff] [blame] | 2062 | { | 
 | 2063 | 	__be16 protocol = skb->protocol; | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2064 | 	u32 features = skb->dev->features; | 
| Jesse Gross | 58e998c | 2010-10-29 12:14:55 +0000 | [diff] [blame] | 2065 |  | 
 | 2066 | 	if (protocol == htons(ETH_P_8021Q)) { | 
 | 2067 | 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 
 | 2068 | 		protocol = veh->h_vlan_encapsulated_proto; | 
| Jesse Gross | f01a523 | 2011-01-09 06:23:31 +0000 | [diff] [blame] | 2069 | 	} else if (!vlan_tx_tag_present(skb)) { | 
 | 2070 | 		return harmonize_features(skb, protocol, features); | 
 | 2071 | 	} | 
| Jesse Gross | 58e998c | 2010-10-29 12:14:55 +0000 | [diff] [blame] | 2072 |  | 
| Jesse Gross | 6ee400a | 2011-01-17 20:46:00 +0000 | [diff] [blame] | 2073 | 	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX); | 
| Jesse Gross | f01a523 | 2011-01-09 06:23:31 +0000 | [diff] [blame] | 2074 |  | 
 | 2075 | 	if (protocol != htons(ETH_P_8021Q)) { | 
 | 2076 | 		return harmonize_features(skb, protocol, features); | 
 | 2077 | 	} else { | 
 | 2078 | 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | | 
| Jesse Gross | 6ee400a | 2011-01-17 20:46:00 +0000 | [diff] [blame] | 2079 | 				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX; | 
| Jesse Gross | f01a523 | 2011-01-09 06:23:31 +0000 | [diff] [blame] | 2080 | 		return harmonize_features(skb, protocol, features); | 
 | 2081 | 	} | 
| Jesse Gross | 58e998c | 2010-10-29 12:14:55 +0000 | [diff] [blame] | 2082 | } | 
| Jesse Gross | f01a523 | 2011-01-09 06:23:31 +0000 | [diff] [blame] | 2083 | EXPORT_SYMBOL(netif_skb_features); | 
| Jesse Gross | 58e998c | 2010-10-29 12:14:55 +0000 | [diff] [blame] | 2084 |  | 
| John Fastabend | 6afff0c | 2010-06-16 14:18:12 +0000 | [diff] [blame] | 2085 | /* | 
 | 2086 |  * Returns true if either: | 
 | 2087 |  *	1. skb has frag_list and the device doesn't support FRAGLIST, or | 
 | 2088 |  *	2. skb is fragmented and the device does not support SG, or if | 
 | 2089 |  *	   at least one of fragments is in highmem and device does not | 
 | 2090 |  *	   support DMA from it. | 
 | 2091 |  */ | 
 | 2092 | static inline int skb_needs_linearize(struct sk_buff *skb, | 
| Jesse Gross | 02932ce | 2011-01-09 06:23:34 +0000 | [diff] [blame] | 2093 | 				      int features) | 
| John Fastabend | 6afff0c | 2010-06-16 14:18:12 +0000 | [diff] [blame] | 2094 | { | 
| Jesse Gross | 02932ce | 2011-01-09 06:23:34 +0000 | [diff] [blame] | 2095 | 	return skb_is_nonlinear(skb) && | 
 | 2096 | 			((skb_has_frag_list(skb) && | 
 | 2097 | 				!(features & NETIF_F_FRAGLIST)) || | 
| Jesse Gross | e1e78db | 2010-10-29 12:14:53 +0000 | [diff] [blame] | 2098 | 			(skb_shinfo(skb)->nr_frags && | 
| Jesse Gross | 02932ce | 2011-01-09 06:23:34 +0000 | [diff] [blame] | 2099 | 				!(features & NETIF_F_SG))); | 
| John Fastabend | 6afff0c | 2010-06-16 14:18:12 +0000 | [diff] [blame] | 2100 | } | 
 | 2101 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2102 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 
 | 2103 | 			struct netdev_queue *txq) | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2104 | { | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 2105 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 2106 | 	int rc = NETDEV_TX_OK; | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 2107 |  | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2108 | 	if (likely(!skb->next)) { | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2109 | 		u32 features; | 
| Jesse Gross | fc74121 | 2011-01-09 06:23:32 +0000 | [diff] [blame] | 2110 |  | 
| Eric Dumazet | 93f154b | 2009-05-18 22:19:19 -0700 | [diff] [blame] | 2111 | 		/* | 
 | 2112 | 		 * If device doesnt need skb->dst, release it right now while | 
 | 2113 | 		 * its hot in this cpu cache | 
 | 2114 | 		 */ | 
| Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 2115 | 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | 
 | 2116 | 			skb_dst_drop(skb); | 
 | 2117 |  | 
| Eric Dumazet | 15c2d75f | 2010-12-07 00:30:37 +0000 | [diff] [blame] | 2118 | 		if (!list_empty(&ptype_all)) | 
 | 2119 | 			dev_queue_xmit_nit(skb, dev); | 
 | 2120 |  | 
| Eric Dumazet | fc6055a | 2010-04-16 12:18:22 +0000 | [diff] [blame] | 2121 | 		skb_orphan_try(skb); | 
| David S. Miller | 9ccb897 | 2010-04-22 01:02:07 -0700 | [diff] [blame] | 2122 |  | 
| Jesse Gross | fc74121 | 2011-01-09 06:23:32 +0000 | [diff] [blame] | 2123 | 		features = netif_skb_features(skb); | 
 | 2124 |  | 
| Jesse Gross | 7b9c609 | 2010-10-20 13:56:04 +0000 | [diff] [blame] | 2125 | 		if (vlan_tx_tag_present(skb) && | 
| Jesse Gross | fc74121 | 2011-01-09 06:23:32 +0000 | [diff] [blame] | 2126 | 		    !(features & NETIF_F_HW_VLAN_TX)) { | 
| Jesse Gross | 7b9c609 | 2010-10-20 13:56:04 +0000 | [diff] [blame] | 2127 | 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); | 
 | 2128 | 			if (unlikely(!skb)) | 
 | 2129 | 				goto out; | 
 | 2130 |  | 
 | 2131 | 			skb->vlan_tci = 0; | 
 | 2132 | 		} | 
 | 2133 |  | 
| Jesse Gross | fc74121 | 2011-01-09 06:23:32 +0000 | [diff] [blame] | 2134 | 		if (netif_needs_gso(skb, features)) { | 
| Jesse Gross | 91ecb63 | 2011-01-09 06:23:33 +0000 | [diff] [blame] | 2135 | 			if (unlikely(dev_gso_segment(skb, features))) | 
| David S. Miller | 9ccb897 | 2010-04-22 01:02:07 -0700 | [diff] [blame] | 2136 | 				goto out_kfree_skb; | 
 | 2137 | 			if (skb->next) | 
 | 2138 | 				goto gso; | 
| John Fastabend | 6afff0c | 2010-06-16 14:18:12 +0000 | [diff] [blame] | 2139 | 		} else { | 
| Jesse Gross | 02932ce | 2011-01-09 06:23:34 +0000 | [diff] [blame] | 2140 | 			if (skb_needs_linearize(skb, features) && | 
| John Fastabend | 6afff0c | 2010-06-16 14:18:12 +0000 | [diff] [blame] | 2141 | 			    __skb_linearize(skb)) | 
 | 2142 | 				goto out_kfree_skb; | 
 | 2143 |  | 
 | 2144 | 			/* If packet is not checksummed and device does not | 
 | 2145 | 			 * support checksumming for this protocol, complete | 
 | 2146 | 			 * checksumming here. | 
 | 2147 | 			 */ | 
 | 2148 | 			if (skb->ip_summed == CHECKSUM_PARTIAL) { | 
| Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 2149 | 				skb_set_transport_header(skb, | 
 | 2150 | 					skb_checksum_start_offset(skb)); | 
| Jesse Gross | 0363466 | 2011-01-09 06:23:35 +0000 | [diff] [blame] | 2151 | 				if (!(features & NETIF_F_ALL_CSUM) && | 
| John Fastabend | 6afff0c | 2010-06-16 14:18:12 +0000 | [diff] [blame] | 2152 | 				     skb_checksum_help(skb)) | 
 | 2153 | 					goto out_kfree_skb; | 
 | 2154 | 			} | 
| David S. Miller | 9ccb897 | 2010-04-22 01:02:07 -0700 | [diff] [blame] | 2155 | 		} | 
 | 2156 |  | 
| Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 2157 | 		rc = ops->ndo_start_xmit(skb, dev); | 
| Koki Sanagi | cf66ba5 | 2010-08-23 18:45:02 +0900 | [diff] [blame] | 2158 | 		trace_net_dev_xmit(skb, rc); | 
| Patrick McHardy | ec634fe | 2009-07-05 19:23:38 -0700 | [diff] [blame] | 2159 | 		if (rc == NETDEV_TX_OK) | 
| Eric Dumazet | 08baf56 | 2009-05-25 22:58:01 -0700 | [diff] [blame] | 2160 | 			txq_trans_update(txq); | 
| Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 2161 | 		return rc; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2162 | 	} | 
 | 2163 |  | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2164 | gso: | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2165 | 	do { | 
 | 2166 | 		struct sk_buff *nskb = skb->next; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2167 |  | 
 | 2168 | 		skb->next = nskb->next; | 
 | 2169 | 		nskb->next = NULL; | 
| Krishna Kumar | 068a2de | 2009-12-09 20:59:58 +0000 | [diff] [blame] | 2170 |  | 
 | 2171 | 		/* | 
 | 2172 | 		 * If device doesnt need nskb->dst, release it right now while | 
 | 2173 | 		 * its hot in this cpu cache | 
 | 2174 | 		 */ | 
 | 2175 | 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | 
 | 2176 | 			skb_dst_drop(nskb); | 
 | 2177 |  | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 2178 | 		rc = ops->ndo_start_xmit(nskb, dev); | 
| Koki Sanagi | cf66ba5 | 2010-08-23 18:45:02 +0900 | [diff] [blame] | 2179 | 		trace_net_dev_xmit(nskb, rc); | 
| Patrick McHardy | ec634fe | 2009-07-05 19:23:38 -0700 | [diff] [blame] | 2180 | 		if (unlikely(rc != NETDEV_TX_OK)) { | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 2181 | 			if (rc & ~NETDEV_TX_MASK) | 
 | 2182 | 				goto out_kfree_gso_skb; | 
| Michael Chan | f54d9e8 | 2006-06-25 23:57:04 -0700 | [diff] [blame] | 2183 | 			nskb->next = skb->next; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2184 | 			skb->next = nskb; | 
 | 2185 | 			return rc; | 
 | 2186 | 		} | 
| Eric Dumazet | 08baf56 | 2009-05-25 22:58:01 -0700 | [diff] [blame] | 2187 | 		txq_trans_update(txq); | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2188 | 		if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) | 
| Michael Chan | f54d9e8 | 2006-06-25 23:57:04 -0700 | [diff] [blame] | 2189 | 			return NETDEV_TX_BUSY; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2190 | 	} while (skb->next); | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 2191 |  | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 2192 | out_kfree_gso_skb: | 
 | 2193 | 	if (likely(skb->next == NULL)) | 
 | 2194 | 		skb->destructor = DEV_GSO_CB(skb)->destructor; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2195 | out_kfree_skb: | 
 | 2196 | 	kfree_skb(skb); | 
| Jesse Gross | 7b9c609 | 2010-10-20 13:56:04 +0000 | [diff] [blame] | 2197 | out: | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 2198 | 	return rc; | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2199 | } | 
 | 2200 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2201 | static u32 hashrnd __read_mostly; | 
| David S. Miller | b6b2fed | 2008-07-21 09:48:06 -0700 | [diff] [blame] | 2202 |  | 
| Vladislav Zolotarov | a3d22a6 | 2010-12-13 06:27:10 +0000 | [diff] [blame] | 2203 | /* | 
 | 2204 |  * Returns a Tx hash based on the given packet descriptor a Tx queues' number | 
 | 2205 |  * to be used as a distribution range. | 
 | 2206 |  */ | 
 | 2207 | u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, | 
 | 2208 | 		  unsigned int num_tx_queues) | 
| David S. Miller | 8f0f222 | 2008-07-15 03:47:03 -0700 | [diff] [blame] | 2209 | { | 
| David S. Miller | 7019298 | 2009-01-27 16:34:47 -0800 | [diff] [blame] | 2210 | 	u32 hash; | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 2211 | 	u16 qoffset = 0; | 
 | 2212 | 	u16 qcount = num_tx_queues; | 
| David S. Miller | b6b2fed | 2008-07-21 09:48:06 -0700 | [diff] [blame] | 2213 |  | 
| David S. Miller | 513de11 | 2009-05-03 14:43:10 -0700 | [diff] [blame] | 2214 | 	if (skb_rx_queue_recorded(skb)) { | 
 | 2215 | 		hash = skb_get_rx_queue(skb); | 
| Vladislav Zolotarov | a3d22a6 | 2010-12-13 06:27:10 +0000 | [diff] [blame] | 2216 | 		while (unlikely(hash >= num_tx_queues)) | 
 | 2217 | 			hash -= num_tx_queues; | 
| David S. Miller | 513de11 | 2009-05-03 14:43:10 -0700 | [diff] [blame] | 2218 | 		return hash; | 
 | 2219 | 	} | 
| Eric Dumazet | ec581f6 | 2009-05-01 09:05:06 -0700 | [diff] [blame] | 2220 |  | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 2221 | 	if (dev->num_tc) { | 
 | 2222 | 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority); | 
 | 2223 | 		qoffset = dev->tc_to_txq[tc].offset; | 
 | 2224 | 		qcount = dev->tc_to_txq[tc].count; | 
 | 2225 | 	} | 
 | 2226 |  | 
| Eric Dumazet | ec581f6 | 2009-05-01 09:05:06 -0700 | [diff] [blame] | 2227 | 	if (skb->sk && skb->sk->sk_hash) | 
| David S. Miller | 7019298 | 2009-01-27 16:34:47 -0800 | [diff] [blame] | 2228 | 		hash = skb->sk->sk_hash; | 
| Eric Dumazet | ec581f6 | 2009-05-01 09:05:06 -0700 | [diff] [blame] | 2229 | 	else | 
| Eric Dumazet | 87fd308 | 2010-07-13 05:24:20 +0000 | [diff] [blame] | 2230 | 		hash = (__force u16) skb->protocol ^ skb->rxhash; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2231 | 	hash = jhash_1word(hash, hashrnd); | 
| David S. Miller | d5a9e24 | 2009-01-27 16:22:11 -0800 | [diff] [blame] | 2232 |  | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 2233 | 	return (u16) (((u64) hash * qcount) >> 32) + qoffset; | 
| David S. Miller | 8f0f222 | 2008-07-15 03:47:03 -0700 | [diff] [blame] | 2234 | } | 
| Vladislav Zolotarov | a3d22a6 | 2010-12-13 06:27:10 +0000 | [diff] [blame] | 2235 | EXPORT_SYMBOL(__skb_tx_hash); | 
| David S. Miller | 8f0f222 | 2008-07-15 03:47:03 -0700 | [diff] [blame] | 2236 |  | 
| Eric Dumazet | ed04642 | 2009-11-13 21:54:04 +0000 | [diff] [blame] | 2237 | static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | 
 | 2238 | { | 
 | 2239 | 	if (unlikely(queue_index >= dev->real_num_tx_queues)) { | 
 | 2240 | 		if (net_ratelimit()) { | 
| Eric Dumazet | 7a161ea | 2010-04-08 21:26:13 +0000 | [diff] [blame] | 2241 | 			pr_warning("%s selects TX queue %d, but " | 
 | 2242 | 				"real number of TX queues is %d\n", | 
 | 2243 | 				dev->name, queue_index, dev->real_num_tx_queues); | 
| Eric Dumazet | ed04642 | 2009-11-13 21:54:04 +0000 | [diff] [blame] | 2244 | 		} | 
 | 2245 | 		return 0; | 
 | 2246 | 	} | 
 | 2247 | 	return queue_index; | 
 | 2248 | } | 
 | 2249 |  | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 2250 | static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | 
 | 2251 | { | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 2252 | #ifdef CONFIG_XPS | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 2253 | 	struct xps_dev_maps *dev_maps; | 
 | 2254 | 	struct xps_map *map; | 
 | 2255 | 	int queue_index = -1; | 
 | 2256 |  | 
 | 2257 | 	rcu_read_lock(); | 
 | 2258 | 	dev_maps = rcu_dereference(dev->xps_maps); | 
 | 2259 | 	if (dev_maps) { | 
 | 2260 | 		map = rcu_dereference( | 
 | 2261 | 		    dev_maps->cpu_map[raw_smp_processor_id()]); | 
 | 2262 | 		if (map) { | 
 | 2263 | 			if (map->len == 1) | 
 | 2264 | 				queue_index = map->queues[0]; | 
 | 2265 | 			else { | 
 | 2266 | 				u32 hash; | 
 | 2267 | 				if (skb->sk && skb->sk->sk_hash) | 
 | 2268 | 					hash = skb->sk->sk_hash; | 
 | 2269 | 				else | 
 | 2270 | 					hash = (__force u16) skb->protocol ^ | 
 | 2271 | 					    skb->rxhash; | 
 | 2272 | 				hash = jhash_1word(hash, hashrnd); | 
 | 2273 | 				queue_index = map->queues[ | 
 | 2274 | 				    ((u64)hash * map->len) >> 32]; | 
 | 2275 | 			} | 
 | 2276 | 			if (unlikely(queue_index >= dev->real_num_tx_queues)) | 
 | 2277 | 				queue_index = -1; | 
 | 2278 | 		} | 
 | 2279 | 	} | 
 | 2280 | 	rcu_read_unlock(); | 
 | 2281 |  | 
 | 2282 | 	return queue_index; | 
 | 2283 | #else | 
 | 2284 | 	return -1; | 
 | 2285 | #endif | 
 | 2286 | } | 
 | 2287 |  | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2288 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | 
 | 2289 | 					struct sk_buff *skb) | 
 | 2290 | { | 
| Tom Herbert | b0f77d0 | 2010-07-14 20:50:29 -0700 | [diff] [blame] | 2291 | 	int queue_index; | 
| Helmut Schaa | deabc77 | 2010-09-03 02:39:56 +0000 | [diff] [blame] | 2292 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2293 |  | 
| Tom Herbert | 3853b58 | 2010-11-21 13:17:29 +0000 | [diff] [blame] | 2294 | 	if (dev->real_num_tx_queues == 1) | 
 | 2295 | 		queue_index = 0; | 
 | 2296 | 	else if (ops->ndo_select_queue) { | 
| Helmut Schaa | deabc77 | 2010-09-03 02:39:56 +0000 | [diff] [blame] | 2297 | 		queue_index = ops->ndo_select_queue(dev, skb); | 
 | 2298 | 		queue_index = dev_cap_txqueue(dev, queue_index); | 
 | 2299 | 	} else { | 
 | 2300 | 		struct sock *sk = skb->sk; | 
 | 2301 | 		queue_index = sk_tx_queue_get(sk); | 
| Krishna Kumar | a4ee3ce | 2009-10-19 23:50:07 +0000 | [diff] [blame] | 2302 |  | 
| Tom Herbert | 3853b58 | 2010-11-21 13:17:29 +0000 | [diff] [blame] | 2303 | 		if (queue_index < 0 || skb->ooo_okay || | 
 | 2304 | 		    queue_index >= dev->real_num_tx_queues) { | 
 | 2305 | 			int old_index = queue_index; | 
| Krishna Kumar | a4ee3ce | 2009-10-19 23:50:07 +0000 | [diff] [blame] | 2306 |  | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 2307 | 			queue_index = get_xps_queue(dev, skb); | 
 | 2308 | 			if (queue_index < 0) | 
 | 2309 | 				queue_index = skb_tx_hash(dev, skb); | 
| Tom Herbert | 3853b58 | 2010-11-21 13:17:29 +0000 | [diff] [blame] | 2310 |  | 
 | 2311 | 			if (queue_index != old_index && sk) { | 
 | 2312 | 				struct dst_entry *dst = | 
 | 2313 | 				    rcu_dereference_check(sk->sk_dst_cache, 1); | 
| Eric Dumazet | 8728c54 | 2010-04-11 21:18:17 +0000 | [diff] [blame] | 2314 |  | 
 | 2315 | 				if (dst && skb_dst(skb) == dst) | 
 | 2316 | 					sk_tx_queue_set(sk, queue_index); | 
 | 2317 | 			} | 
| Krishna Kumar | a4ee3ce | 2009-10-19 23:50:07 +0000 | [diff] [blame] | 2318 | 		} | 
 | 2319 | 	} | 
| David S. Miller | eae792b | 2008-07-15 03:03:33 -0700 | [diff] [blame] | 2320 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2321 | 	skb_set_queue_mapping(skb, queue_index); | 
 | 2322 | 	return netdev_get_tx_queue(dev, queue_index); | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2323 | } | 
 | 2324 |  | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2325 | static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | 
 | 2326 | 				 struct net_device *dev, | 
 | 2327 | 				 struct netdev_queue *txq) | 
 | 2328 | { | 
 | 2329 | 	spinlock_t *root_lock = qdisc_lock(q); | 
| Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 2330 | 	bool contended; | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2331 | 	int rc; | 
 | 2332 |  | 
| Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 2333 | 	qdisc_skb_cb(skb)->pkt_len = skb->len; | 
 | 2334 | 	qdisc_calculate_pkt_len(skb, q); | 
| Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 2335 | 	/* | 
 | 2336 | 	 * Heuristic to force contended enqueues to serialize on a | 
 | 2337 | 	 * separate lock before trying to get qdisc main lock. | 
 | 2338 | 	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often | 
 | 2339 | 	 * and dequeue packets faster. | 
 | 2340 | 	 */ | 
| Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 2341 | 	contended = qdisc_is_running(q); | 
| Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 2342 | 	if (unlikely(contended)) | 
 | 2343 | 		spin_lock(&q->busylock); | 
 | 2344 |  | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2345 | 	spin_lock(root_lock); | 
 | 2346 | 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { | 
 | 2347 | 		kfree_skb(skb); | 
 | 2348 | 		rc = NET_XMIT_DROP; | 
 | 2349 | 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && | 
| Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 2350 | 		   qdisc_run_begin(q)) { | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2351 | 		/* | 
 | 2352 | 		 * This is a work-conserving queue; there are no old skbs | 
 | 2353 | 		 * waiting to be sent out; and the qdisc is not running - | 
 | 2354 | 		 * xmit the skb directly. | 
 | 2355 | 		 */ | 
| Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 2356 | 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | 
 | 2357 | 			skb_dst_force(skb); | 
| Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 2358 |  | 
| Eric Dumazet | bfe0d02 | 2011-01-09 08:30:54 +0000 | [diff] [blame] | 2359 | 		qdisc_bstats_update(q, skb); | 
 | 2360 |  | 
| Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 2361 | 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { | 
 | 2362 | 			if (unlikely(contended)) { | 
 | 2363 | 				spin_unlock(&q->busylock); | 
 | 2364 | 				contended = false; | 
 | 2365 | 			} | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2366 | 			__qdisc_run(q); | 
| Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 2367 | 		} else | 
| Eric Dumazet | bc135b2 | 2010-06-02 03:23:51 -0700 | [diff] [blame] | 2368 | 			qdisc_run_end(q); | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2369 |  | 
 | 2370 | 		rc = NET_XMIT_SUCCESS; | 
 | 2371 | 	} else { | 
| Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 2372 | 		skb_dst_force(skb); | 
| Eric Dumazet | a2da570 | 2011-01-20 03:48:19 +0000 | [diff] [blame] | 2373 | 		rc = q->enqueue(skb, q) & NET_XMIT_MASK; | 
| Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 2374 | 		if (qdisc_run_begin(q)) { | 
 | 2375 | 			if (unlikely(contended)) { | 
 | 2376 | 				spin_unlock(&q->busylock); | 
 | 2377 | 				contended = false; | 
 | 2378 | 			} | 
 | 2379 | 			__qdisc_run(q); | 
 | 2380 | 		} | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2381 | 	} | 
 | 2382 | 	spin_unlock(root_lock); | 
| Eric Dumazet | 79640a4 | 2010-06-02 05:09:29 -0700 | [diff] [blame] | 2383 | 	if (unlikely(contended)) | 
 | 2384 | 		spin_unlock(&q->busylock); | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2385 | 	return rc; | 
 | 2386 | } | 
 | 2387 |  | 
| Eric Dumazet | 745e20f | 2010-09-29 13:23:09 -0700 | [diff] [blame] | 2388 | static DEFINE_PER_CPU(int, xmit_recursion); | 
| David S. Miller | 11a766c | 2010-10-25 12:51:55 -0700 | [diff] [blame] | 2389 | #define RECURSION_LIMIT 10 | 
| Eric Dumazet | 745e20f | 2010-09-29 13:23:09 -0700 | [diff] [blame] | 2390 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 2391 | /** | 
 | 2392 |  *	dev_queue_xmit - transmit a buffer | 
 | 2393 |  *	@skb: buffer to transmit | 
 | 2394 |  * | 
 | 2395 |  *	Queue a buffer for transmission to a network device. The caller must | 
 | 2396 |  *	have set the device and priority and built the buffer before calling | 
 | 2397 |  *	this function. The function can be called from an interrupt. | 
 | 2398 |  * | 
 | 2399 |  *	A negative errno code is returned on a failure. A success does not | 
 | 2400 |  *	guarantee the frame will be transmitted as it may be dropped due | 
 | 2401 |  *	to congestion or traffic shaping. | 
 | 2402 |  * | 
 | 2403 |  * ----------------------------------------------------------------------------------- | 
 | 2404 |  *      I notice this method can also return errors from the queue disciplines, | 
 | 2405 |  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also | 
 | 2406 |  *      be positive. | 
 | 2407 |  * | 
 | 2408 |  *      Regardless of the return value, the skb is consumed, so it is currently | 
 | 2409 |  *      difficult to retry a send to this method.  (You can bump the ref count | 
 | 2410 |  *      before sending to hold a reference for retry if you are careful.) | 
 | 2411 |  * | 
 | 2412 |  *      When calling this method, interrupts MUST be enabled.  This is because | 
 | 2413 |  *      the BH enable code must have IRQs enabled so that it will not deadlock. | 
 | 2414 |  *          --BLG | 
 | 2415 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2416 | int dev_queue_xmit(struct sk_buff *skb) | 
 | 2417 | { | 
 | 2418 | 	struct net_device *dev = skb->dev; | 
| David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 2419 | 	struct netdev_queue *txq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2420 | 	struct Qdisc *q; | 
 | 2421 | 	int rc = -ENOMEM; | 
 | 2422 |  | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 2423 | 	/* Disable soft irqs for various locks below. Also | 
 | 2424 | 	 * stops preemption for RCU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2425 | 	 */ | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 2426 | 	rcu_read_lock_bh(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2427 |  | 
| David S. Miller | eae792b | 2008-07-15 03:03:33 -0700 | [diff] [blame] | 2428 | 	txq = dev_pick_tx(dev, skb); | 
| Paul E. McKenney | a898def | 2010-02-22 17:04:49 -0800 | [diff] [blame] | 2429 | 	q = rcu_dereference_bh(txq->qdisc); | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 2430 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2431 | #ifdef CONFIG_NET_CLS_ACT | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 2432 | 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2433 | #endif | 
| Koki Sanagi | cf66ba5 | 2010-08-23 18:45:02 +0900 | [diff] [blame] | 2434 | 	trace_net_dev_queue(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2435 | 	if (q->enqueue) { | 
| Krishna Kumar | bbd8a0d | 2009-08-06 01:44:21 +0000 | [diff] [blame] | 2436 | 		rc = __dev_xmit_skb(skb, q, dev, txq); | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 2437 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2438 | 	} | 
 | 2439 |  | 
 | 2440 | 	/* The device has no queue. Common case for software devices: | 
 | 2441 | 	   loopback, all the sorts of tunnels... | 
 | 2442 |  | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2443 | 	   Really, it is unlikely that netif_tx_lock protection is necessary | 
 | 2444 | 	   here.  (f.e. loopback and IP tunnels are clean ignoring statistics | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2445 | 	   counters.) | 
 | 2446 | 	   However, it is possible, that they rely on protection | 
 | 2447 | 	   made by us here. | 
 | 2448 |  | 
 | 2449 | 	   Check this and shot the lock. It is not prone from deadlocks. | 
 | 2450 | 	   Either shot noqueue qdisc, it is even simpler 8) | 
 | 2451 | 	 */ | 
 | 2452 | 	if (dev->flags & IFF_UP) { | 
 | 2453 | 		int cpu = smp_processor_id(); /* ok because BHs are off */ | 
 | 2454 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2455 | 		if (txq->xmit_lock_owner != cpu) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2456 |  | 
| Eric Dumazet | 745e20f | 2010-09-29 13:23:09 -0700 | [diff] [blame] | 2457 | 			if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) | 
 | 2458 | 				goto recursion_alert; | 
 | 2459 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2460 | 			HARD_TX_LOCK(dev, txq, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2461 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2462 | 			if (!netif_tx_queue_stopped(txq)) { | 
| Eric Dumazet | 745e20f | 2010-09-29 13:23:09 -0700 | [diff] [blame] | 2463 | 				__this_cpu_inc(xmit_recursion); | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 2464 | 				rc = dev_hard_start_xmit(skb, dev, txq); | 
| Eric Dumazet | 745e20f | 2010-09-29 13:23:09 -0700 | [diff] [blame] | 2465 | 				__this_cpu_dec(xmit_recursion); | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 2466 | 				if (dev_xmit_complete(rc)) { | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2467 | 					HARD_TX_UNLOCK(dev, txq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2468 | 					goto out; | 
 | 2469 | 				} | 
 | 2470 | 			} | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2471 | 			HARD_TX_UNLOCK(dev, txq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2472 | 			if (net_ratelimit()) | 
 | 2473 | 				printk(KERN_CRIT "Virtual device %s asks to " | 
 | 2474 | 				       "queue packet!\n", dev->name); | 
 | 2475 | 		} else { | 
 | 2476 | 			/* Recursion is detected! It is possible, | 
| Eric Dumazet | 745e20f | 2010-09-29 13:23:09 -0700 | [diff] [blame] | 2477 | 			 * unfortunately | 
 | 2478 | 			 */ | 
 | 2479 | recursion_alert: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2480 | 			if (net_ratelimit()) | 
 | 2481 | 				printk(KERN_CRIT "Dead loop on virtual device " | 
 | 2482 | 				       "%s, fix it urgently!\n", dev->name); | 
 | 2483 | 		} | 
 | 2484 | 	} | 
 | 2485 |  | 
 | 2486 | 	rc = -ENETDOWN; | 
| Herbert Xu | d4828d8 | 2006-06-22 02:28:18 -0700 | [diff] [blame] | 2487 | 	rcu_read_unlock_bh(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2489 | 	kfree_skb(skb); | 
 | 2490 | 	return rc; | 
 | 2491 | out: | 
| Herbert Xu | d4828d8 | 2006-06-22 02:28:18 -0700 | [diff] [blame] | 2492 | 	rcu_read_unlock_bh(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | 	return rc; | 
 | 2494 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 2495 | EXPORT_SYMBOL(dev_queue_xmit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2496 |  | 
 | 2497 |  | 
 | 2498 | /*======================================================================= | 
 | 2499 | 			Receiver routines | 
 | 2500 |   =======================================================================*/ | 
 | 2501 |  | 
| Stephen Hemminger | 6b2bedc | 2007-03-12 14:33:50 -0700 | [diff] [blame] | 2502 | int netdev_max_backlog __read_mostly = 1000; | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 2503 | int netdev_tstamp_prequeue __read_mostly = 1; | 
| Stephen Hemminger | 6b2bedc | 2007-03-12 14:33:50 -0700 | [diff] [blame] | 2504 | int netdev_budget __read_mostly = 300; | 
 | 2505 | int weight_p __read_mostly = 64;            /* old backlog weight */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2506 |  | 
| Eric Dumazet | eecfd7c | 2010-05-06 22:07:48 -0700 | [diff] [blame] | 2507 | /* Called with irq disabled */ | 
 | 2508 | static inline void ____napi_schedule(struct softnet_data *sd, | 
 | 2509 | 				     struct napi_struct *napi) | 
 | 2510 | { | 
 | 2511 | 	list_add_tail(&napi->poll_list, &sd->poll_list); | 
 | 2512 | 	__raise_softirq_irqoff(NET_RX_SOFTIRQ); | 
 | 2513 | } | 
 | 2514 |  | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2515 | /* | 
 | 2516 |  * __skb_get_rxhash: calculate a flow hash based on src/dst addresses | 
 | 2517 |  * and src/dst port numbers. Returns a non-zero hash number on success | 
 | 2518 |  * and 0 on failure. | 
 | 2519 |  */ | 
 | 2520 | __u32 __skb_get_rxhash(struct sk_buff *skb) | 
 | 2521 | { | 
| Changli Gao | 12fcdef | 2010-08-17 19:04:32 +0000 | [diff] [blame] | 2522 | 	int nhoff, hash = 0, poff; | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2523 | 	struct ipv6hdr *ip6; | 
 | 2524 | 	struct iphdr *ip; | 
 | 2525 | 	u8 ip_proto; | 
 | 2526 | 	u32 addr1, addr2, ihl; | 
 | 2527 | 	union { | 
 | 2528 | 		u32 v32; | 
 | 2529 | 		u16 v16[2]; | 
 | 2530 | 	} ports; | 
 | 2531 |  | 
 | 2532 | 	nhoff = skb_network_offset(skb); | 
 | 2533 |  | 
 | 2534 | 	switch (skb->protocol) { | 
 | 2535 | 	case __constant_htons(ETH_P_IP): | 
 | 2536 | 		if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) | 
 | 2537 | 			goto done; | 
 | 2538 |  | 
| Changli Gao | 1003489 | 2010-08-21 06:13:28 +0000 | [diff] [blame] | 2539 | 		ip = (struct iphdr *) (skb->data + nhoff); | 
| Changli Gao | dbe5775 | 2010-08-17 19:01:38 +0000 | [diff] [blame] | 2540 | 		if (ip->frag_off & htons(IP_MF | IP_OFFSET)) | 
 | 2541 | 			ip_proto = 0; | 
 | 2542 | 		else | 
 | 2543 | 			ip_proto = ip->protocol; | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2544 | 		addr1 = (__force u32) ip->saddr; | 
 | 2545 | 		addr2 = (__force u32) ip->daddr; | 
 | 2546 | 		ihl = ip->ihl; | 
 | 2547 | 		break; | 
 | 2548 | 	case __constant_htons(ETH_P_IPV6): | 
 | 2549 | 		if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) | 
 | 2550 | 			goto done; | 
 | 2551 |  | 
| Changli Gao | 1003489 | 2010-08-21 06:13:28 +0000 | [diff] [blame] | 2552 | 		ip6 = (struct ipv6hdr *) (skb->data + nhoff); | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2553 | 		ip_proto = ip6->nexthdr; | 
 | 2554 | 		addr1 = (__force u32) ip6->saddr.s6_addr32[3]; | 
 | 2555 | 		addr2 = (__force u32) ip6->daddr.s6_addr32[3]; | 
 | 2556 | 		ihl = (40 >> 2); | 
 | 2557 | 		break; | 
 | 2558 | 	default: | 
 | 2559 | 		goto done; | 
 | 2560 | 	} | 
 | 2561 |  | 
| Changli Gao | 12fcdef | 2010-08-17 19:04:32 +0000 | [diff] [blame] | 2562 | 	ports.v32 = 0; | 
 | 2563 | 	poff = proto_ports_offset(ip_proto); | 
 | 2564 | 	if (poff >= 0) { | 
 | 2565 | 		nhoff += ihl * 4 + poff; | 
 | 2566 | 		if (pskb_may_pull(skb, nhoff + 4)) { | 
 | 2567 | 			ports.v32 = * (__force u32 *) (skb->data + nhoff); | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2568 | 			if (ports.v16[1] < ports.v16[0]) | 
 | 2569 | 				swap(ports.v16[0], ports.v16[1]); | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2570 | 		} | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2571 | 	} | 
 | 2572 |  | 
 | 2573 | 	/* get a consistent hash (same value on both flow directions) */ | 
 | 2574 | 	if (addr2 < addr1) | 
 | 2575 | 		swap(addr1, addr2); | 
 | 2576 |  | 
 | 2577 | 	hash = jhash_3words(addr1, addr2, ports.v32, hashrnd); | 
 | 2578 | 	if (!hash) | 
 | 2579 | 		hash = 1; | 
 | 2580 |  | 
 | 2581 | done: | 
 | 2582 | 	return hash; | 
 | 2583 | } | 
 | 2584 | EXPORT_SYMBOL(__skb_get_rxhash); | 
 | 2585 |  | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 2586 | #ifdef CONFIG_RPS | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2587 |  | 
 | 2588 | /* One global table that all flow-based protocols share. */ | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 2589 | struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2590 | EXPORT_SYMBOL(rps_sock_flow_table); | 
 | 2591 |  | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 2592 | static struct rps_dev_flow * | 
 | 2593 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, | 
 | 2594 | 	    struct rps_dev_flow *rflow, u16 next_cpu) | 
 | 2595 | { | 
 | 2596 | 	u16 tcpu; | 
 | 2597 |  | 
 | 2598 | 	tcpu = rflow->cpu = next_cpu; | 
 | 2599 | 	if (tcpu != RPS_NO_CPU) { | 
 | 2600 | #ifdef CONFIG_RFS_ACCEL | 
 | 2601 | 		struct netdev_rx_queue *rxqueue; | 
 | 2602 | 		struct rps_dev_flow_table *flow_table; | 
 | 2603 | 		struct rps_dev_flow *old_rflow; | 
 | 2604 | 		u32 flow_id; | 
 | 2605 | 		u16 rxq_index; | 
 | 2606 | 		int rc; | 
 | 2607 |  | 
 | 2608 | 		/* Should we steer this flow to a different hardware queue? */ | 
 | 2609 | 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap) | 
 | 2610 | 			goto out; | 
 | 2611 | 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); | 
 | 2612 | 		if (rxq_index == skb_get_rx_queue(skb)) | 
 | 2613 | 			goto out; | 
 | 2614 |  | 
 | 2615 | 		rxqueue = dev->_rx + rxq_index; | 
 | 2616 | 		flow_table = rcu_dereference(rxqueue->rps_flow_table); | 
 | 2617 | 		if (!flow_table) | 
 | 2618 | 			goto out; | 
 | 2619 | 		flow_id = skb->rxhash & flow_table->mask; | 
 | 2620 | 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, | 
 | 2621 | 							rxq_index, flow_id); | 
 | 2622 | 		if (rc < 0) | 
 | 2623 | 			goto out; | 
 | 2624 | 		old_rflow = rflow; | 
 | 2625 | 		rflow = &flow_table->flows[flow_id]; | 
 | 2626 | 		rflow->cpu = next_cpu; | 
 | 2627 | 		rflow->filter = rc; | 
 | 2628 | 		if (old_rflow->filter == rflow->filter) | 
 | 2629 | 			old_rflow->filter = RPS_NO_FILTER; | 
 | 2630 | 	out: | 
 | 2631 | #endif | 
 | 2632 | 		rflow->last_qtail = | 
 | 2633 | 			per_cpu(softnet_data, tcpu).input_queue_head; | 
 | 2634 | 	} | 
 | 2635 |  | 
 | 2636 | 	return rflow; | 
 | 2637 | } | 
 | 2638 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2639 | /* | 
 | 2640 |  * get_rps_cpu is called from netif_receive_skb and returns the target | 
 | 2641 |  * CPU from the RPS map of the receiving queue for a given skb. | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2642 |  * rcu_read_lock must be held on entry. | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2643 |  */ | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2644 | static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | 
 | 2645 | 		       struct rps_dev_flow **rflowp) | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2646 | { | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2647 | 	struct netdev_rx_queue *rxqueue; | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 2648 | 	struct rps_map *map; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2649 | 	struct rps_dev_flow_table *flow_table; | 
 | 2650 | 	struct rps_sock_flow_table *sock_flow_table; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2651 | 	int cpu = -1; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2652 | 	u16 tcpu; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2653 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2654 | 	if (skb_rx_queue_recorded(skb)) { | 
 | 2655 | 		u16 index = skb_get_rx_queue(skb); | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 2656 | 		if (unlikely(index >= dev->real_num_rx_queues)) { | 
 | 2657 | 			WARN_ONCE(dev->real_num_rx_queues > 1, | 
 | 2658 | 				  "%s received packet on queue %u, but number " | 
 | 2659 | 				  "of RX queues is %u\n", | 
 | 2660 | 				  dev->name, index, dev->real_num_rx_queues); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2661 | 			goto done; | 
 | 2662 | 		} | 
 | 2663 | 		rxqueue = dev->_rx + index; | 
 | 2664 | 	} else | 
 | 2665 | 		rxqueue = dev->_rx; | 
 | 2666 |  | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 2667 | 	map = rcu_dereference(rxqueue->rps_map); | 
 | 2668 | 	if (map) { | 
| Tom Herbert | 8587523 | 2011-01-31 16:23:42 -0800 | [diff] [blame] | 2669 | 		if (map->len == 1 && | 
 | 2670 | 		    !rcu_dereference_raw(rxqueue->rps_flow_table)) { | 
| Changli Gao | 6febfca | 2010-09-03 23:12:37 +0000 | [diff] [blame] | 2671 | 			tcpu = map->cpus[0]; | 
 | 2672 | 			if (cpu_online(tcpu)) | 
 | 2673 | 				cpu = tcpu; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2674 | 			goto done; | 
| Eric Dumazet | b249dcb | 2010-04-19 21:56:38 +0000 | [diff] [blame] | 2675 | 		} | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 2676 | 	} else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2677 | 		goto done; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2678 | 	} | 
 | 2679 |  | 
| Changli Gao | 2d47b45 | 2010-08-17 19:00:56 +0000 | [diff] [blame] | 2680 | 	skb_reset_network_header(skb); | 
| Krishna Kumar | bfb564e | 2010-08-04 06:15:52 +0000 | [diff] [blame] | 2681 | 	if (!skb_get_rxhash(skb)) | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2682 | 		goto done; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2683 |  | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2684 | 	flow_table = rcu_dereference(rxqueue->rps_flow_table); | 
 | 2685 | 	sock_flow_table = rcu_dereference(rps_sock_flow_table); | 
 | 2686 | 	if (flow_table && sock_flow_table) { | 
 | 2687 | 		u16 next_cpu; | 
 | 2688 | 		struct rps_dev_flow *rflow; | 
 | 2689 |  | 
 | 2690 | 		rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; | 
 | 2691 | 		tcpu = rflow->cpu; | 
 | 2692 |  | 
 | 2693 | 		next_cpu = sock_flow_table->ents[skb->rxhash & | 
 | 2694 | 		    sock_flow_table->mask]; | 
 | 2695 |  | 
 | 2696 | 		/* | 
 | 2697 | 		 * If the desired CPU (where last recvmsg was done) is | 
 | 2698 | 		 * different from current CPU (one in the rx-queue flow | 
 | 2699 | 		 * table entry), switch if one of the following holds: | 
 | 2700 | 		 *   - Current CPU is unset (equal to RPS_NO_CPU). | 
 | 2701 | 		 *   - Current CPU is offline. | 
 | 2702 | 		 *   - The current CPU's queue tail has advanced beyond the | 
 | 2703 | 		 *     last packet that was enqueued using this table entry. | 
 | 2704 | 		 *     This guarantees that all previous packets for the flow | 
 | 2705 | 		 *     have been dequeued, thus preserving in order delivery. | 
 | 2706 | 		 */ | 
 | 2707 | 		if (unlikely(tcpu != next_cpu) && | 
 | 2708 | 		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || | 
 | 2709 | 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head - | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 2710 | 		      rflow->last_qtail)) >= 0)) | 
 | 2711 | 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu); | 
 | 2712 |  | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2713 | 		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { | 
 | 2714 | 			*rflowp = rflow; | 
 | 2715 | 			cpu = tcpu; | 
 | 2716 | 			goto done; | 
 | 2717 | 		} | 
 | 2718 | 	} | 
 | 2719 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2720 | 	if (map) { | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2721 | 		tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2722 |  | 
 | 2723 | 		if (cpu_online(tcpu)) { | 
 | 2724 | 			cpu = tcpu; | 
 | 2725 | 			goto done; | 
 | 2726 | 		} | 
 | 2727 | 	} | 
 | 2728 |  | 
 | 2729 | done: | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2730 | 	return cpu; | 
 | 2731 | } | 
 | 2732 |  | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 2733 | #ifdef CONFIG_RFS_ACCEL | 
 | 2734 |  | 
 | 2735 | /** | 
 | 2736 |  * rps_may_expire_flow - check whether an RFS hardware filter may be removed | 
 | 2737 |  * @dev: Device on which the filter was set | 
 | 2738 |  * @rxq_index: RX queue index | 
 | 2739 |  * @flow_id: Flow ID passed to ndo_rx_flow_steer() | 
 | 2740 |  * @filter_id: Filter ID returned by ndo_rx_flow_steer() | 
 | 2741 |  * | 
 | 2742 |  * Drivers that implement ndo_rx_flow_steer() should periodically call | 
 | 2743 |  * this function for each installed filter and remove the filters for | 
 | 2744 |  * which it returns %true. | 
 | 2745 |  */ | 
 | 2746 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | 
 | 2747 | 			 u32 flow_id, u16 filter_id) | 
 | 2748 | { | 
 | 2749 | 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index; | 
 | 2750 | 	struct rps_dev_flow_table *flow_table; | 
 | 2751 | 	struct rps_dev_flow *rflow; | 
 | 2752 | 	bool expire = true; | 
 | 2753 | 	int cpu; | 
 | 2754 |  | 
 | 2755 | 	rcu_read_lock(); | 
 | 2756 | 	flow_table = rcu_dereference(rxqueue->rps_flow_table); | 
 | 2757 | 	if (flow_table && flow_id <= flow_table->mask) { | 
 | 2758 | 		rflow = &flow_table->flows[flow_id]; | 
 | 2759 | 		cpu = ACCESS_ONCE(rflow->cpu); | 
 | 2760 | 		if (rflow->filter == filter_id && cpu != RPS_NO_CPU && | 
 | 2761 | 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head - | 
 | 2762 | 			   rflow->last_qtail) < | 
 | 2763 | 		     (int)(10 * flow_table->mask))) | 
 | 2764 | 			expire = false; | 
 | 2765 | 	} | 
 | 2766 | 	rcu_read_unlock(); | 
 | 2767 | 	return expire; | 
 | 2768 | } | 
 | 2769 | EXPORT_SYMBOL(rps_may_expire_flow); | 
 | 2770 |  | 
 | 2771 | #endif /* CONFIG_RFS_ACCEL */ | 
 | 2772 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2773 | /* Called from hardirq (IPI) context */ | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2774 | static void rps_trigger_softirq(void *data) | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2775 | { | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2776 | 	struct softnet_data *sd = data; | 
 | 2777 |  | 
| Eric Dumazet | eecfd7c | 2010-05-06 22:07:48 -0700 | [diff] [blame] | 2778 | 	____napi_schedule(sd, &sd->backlog); | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 2779 | 	sd->received_rps++; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2780 | } | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2781 |  | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2782 | #endif /* CONFIG_RPS */ | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2783 |  | 
 | 2784 | /* | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2785 |  * Check if this softnet_data structure is another cpu one | 
 | 2786 |  * If yes, queue it to our IPI list and return 1 | 
 | 2787 |  * If no, return 0 | 
 | 2788 |  */ | 
 | 2789 | static int rps_ipi_queued(struct softnet_data *sd) | 
 | 2790 | { | 
 | 2791 | #ifdef CONFIG_RPS | 
 | 2792 | 	struct softnet_data *mysd = &__get_cpu_var(softnet_data); | 
 | 2793 |  | 
 | 2794 | 	if (sd != mysd) { | 
 | 2795 | 		sd->rps_ipi_next = mysd->rps_ipi_list; | 
 | 2796 | 		mysd->rps_ipi_list = sd; | 
 | 2797 |  | 
 | 2798 | 		__raise_softirq_irqoff(NET_RX_SOFTIRQ); | 
 | 2799 | 		return 1; | 
 | 2800 | 	} | 
 | 2801 | #endif /* CONFIG_RPS */ | 
 | 2802 | 	return 0; | 
 | 2803 | } | 
 | 2804 |  | 
 | 2805 | /* | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2806 |  * enqueue_to_backlog is called to queue an skb to a per CPU backlog | 
 | 2807 |  * queue (may be a remote CPU queue). | 
 | 2808 |  */ | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2809 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu, | 
 | 2810 | 			      unsigned int *qtail) | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2811 | { | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2812 | 	struct softnet_data *sd; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2813 | 	unsigned long flags; | 
 | 2814 |  | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2815 | 	sd = &per_cpu(softnet_data, cpu); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2816 |  | 
 | 2817 | 	local_irq_save(flags); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2818 |  | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2819 | 	rps_lock(sd); | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 2820 | 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { | 
 | 2821 | 		if (skb_queue_len(&sd->input_pkt_queue)) { | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2822 | enqueue: | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2823 | 			__skb_queue_tail(&sd->input_pkt_queue, skb); | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 2824 | 			input_queue_tail_incr_save(sd, qtail); | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2825 | 			rps_unlock(sd); | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 2826 | 			local_irq_restore(flags); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2827 | 			return NET_RX_SUCCESS; | 
 | 2828 | 		} | 
 | 2829 |  | 
| Eric Dumazet | ebda37c2 | 2010-05-06 23:51:21 +0000 | [diff] [blame] | 2830 | 		/* Schedule NAPI for backlog device | 
 | 2831 | 		 * We can use non atomic operation since we own the queue lock | 
 | 2832 | 		 */ | 
 | 2833 | 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2834 | 			if (!rps_ipi_queued(sd)) | 
| Eric Dumazet | eecfd7c | 2010-05-06 22:07:48 -0700 | [diff] [blame] | 2835 | 				____napi_schedule(sd, &sd->backlog); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2836 | 		} | 
 | 2837 | 		goto enqueue; | 
 | 2838 | 	} | 
 | 2839 |  | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 2840 | 	sd->dropped++; | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 2841 | 	rps_unlock(sd); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2842 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2843 | 	local_irq_restore(flags); | 
 | 2844 |  | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 2845 | 	atomic_long_inc(&skb->dev->rx_dropped); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 2846 | 	kfree_skb(skb); | 
 | 2847 | 	return NET_RX_DROP; | 
 | 2848 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2849 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2850 | /** | 
 | 2851 |  *	netif_rx	-	post buffer to the network code | 
 | 2852 |  *	@skb: buffer to post | 
 | 2853 |  * | 
 | 2854 |  *	This function receives a packet from a device driver and queues it for | 
 | 2855 |  *	the upper (protocol) levels to process.  It always succeeds. The buffer | 
 | 2856 |  *	may be dropped during processing for congestion control or by the | 
 | 2857 |  *	protocol layers. | 
 | 2858 |  * | 
 | 2859 |  *	return values: | 
 | 2860 |  *	NET_RX_SUCCESS	(no congestion) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2861 |  *	NET_RX_DROP     (packet was dropped) | 
 | 2862 |  * | 
 | 2863 |  */ | 
 | 2864 |  | 
 | 2865 | int netif_rx(struct sk_buff *skb) | 
 | 2866 | { | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2867 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2868 |  | 
 | 2869 | 	/* if netpoll wants it, pretend we never saw it */ | 
 | 2870 | 	if (netpoll_rx(skb)) | 
 | 2871 | 		return NET_RX_DROP; | 
 | 2872 |  | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 2873 | 	if (netdev_tstamp_prequeue) | 
 | 2874 | 		net_timestamp_check(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2875 |  | 
| Koki Sanagi | cf66ba5 | 2010-08-23 18:45:02 +0900 | [diff] [blame] | 2876 | 	trace_netif_rx(skb); | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 2877 | #ifdef CONFIG_RPS | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2878 | 	{ | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2879 | 		struct rps_dev_flow voidflow, *rflow = &voidflow; | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2880 | 		int cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2881 |  | 
| Changli Gao | cece194 | 2010-08-07 20:35:43 -0700 | [diff] [blame] | 2882 | 		preempt_disable(); | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2883 | 		rcu_read_lock(); | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2884 |  | 
 | 2885 | 		cpu = get_rps_cpu(skb->dev, skb, &rflow); | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2886 | 		if (cpu < 0) | 
 | 2887 | 			cpu = smp_processor_id(); | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2888 |  | 
 | 2889 | 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); | 
 | 2890 |  | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2891 | 		rcu_read_unlock(); | 
| Changli Gao | cece194 | 2010-08-07 20:35:43 -0700 | [diff] [blame] | 2892 | 		preempt_enable(); | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2893 | 	} | 
 | 2894 | #else | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 2895 | 	{ | 
 | 2896 | 		unsigned int qtail; | 
 | 2897 | 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail); | 
 | 2898 | 		put_cpu(); | 
 | 2899 | 	} | 
| Eric Dumazet | b0e28f1 | 2010-04-15 00:14:07 -0700 | [diff] [blame] | 2900 | #endif | 
 | 2901 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2902 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 2903 | EXPORT_SYMBOL(netif_rx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2904 |  | 
 | 2905 | int netif_rx_ni(struct sk_buff *skb) | 
 | 2906 | { | 
 | 2907 | 	int err; | 
 | 2908 |  | 
 | 2909 | 	preempt_disable(); | 
 | 2910 | 	err = netif_rx(skb); | 
 | 2911 | 	if (local_softirq_pending()) | 
 | 2912 | 		do_softirq(); | 
 | 2913 | 	preempt_enable(); | 
 | 2914 |  | 
 | 2915 | 	return err; | 
 | 2916 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2917 | EXPORT_SYMBOL(netif_rx_ni); | 
 | 2918 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2919 | static void net_tx_action(struct softirq_action *h) | 
 | 2920 | { | 
 | 2921 | 	struct softnet_data *sd = &__get_cpu_var(softnet_data); | 
 | 2922 |  | 
 | 2923 | 	if (sd->completion_queue) { | 
 | 2924 | 		struct sk_buff *clist; | 
 | 2925 |  | 
 | 2926 | 		local_irq_disable(); | 
 | 2927 | 		clist = sd->completion_queue; | 
 | 2928 | 		sd->completion_queue = NULL; | 
 | 2929 | 		local_irq_enable(); | 
 | 2930 |  | 
 | 2931 | 		while (clist) { | 
 | 2932 | 			struct sk_buff *skb = clist; | 
 | 2933 | 			clist = clist->next; | 
 | 2934 |  | 
| Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2935 | 			WARN_ON(atomic_read(&skb->users)); | 
| Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 2936 | 			trace_kfree_skb(skb, net_tx_action); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2937 | 			__kfree_skb(skb); | 
 | 2938 | 		} | 
 | 2939 | 	} | 
 | 2940 |  | 
 | 2941 | 	if (sd->output_queue) { | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 2942 | 		struct Qdisc *head; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2943 |  | 
 | 2944 | 		local_irq_disable(); | 
 | 2945 | 		head = sd->output_queue; | 
 | 2946 | 		sd->output_queue = NULL; | 
| Changli Gao | a9cbd58 | 2010-04-26 23:06:24 +0000 | [diff] [blame] | 2947 | 		sd->output_queue_tailp = &sd->output_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2948 | 		local_irq_enable(); | 
 | 2949 |  | 
 | 2950 | 		while (head) { | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 2951 | 			struct Qdisc *q = head; | 
 | 2952 | 			spinlock_t *root_lock; | 
 | 2953 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2954 | 			head = head->next_sched; | 
 | 2955 |  | 
| David S. Miller | 5fb6622 | 2008-08-02 20:02:43 -0700 | [diff] [blame] | 2956 | 			root_lock = qdisc_lock(q); | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 2957 | 			if (spin_trylock(root_lock)) { | 
| Jarek Poplawski | def82a1 | 2008-08-17 21:54:43 -0700 | [diff] [blame] | 2958 | 				smp_mb__before_clear_bit(); | 
 | 2959 | 				clear_bit(__QDISC_STATE_SCHED, | 
 | 2960 | 					  &q->state); | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 2961 | 				qdisc_run(q); | 
 | 2962 | 				spin_unlock(root_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2963 | 			} else { | 
| David S. Miller | 195648b | 2008-08-19 04:00:36 -0700 | [diff] [blame] | 2964 | 				if (!test_bit(__QDISC_STATE_DEACTIVATED, | 
| Jarek Poplawski | e8a83e1 | 2008-09-07 18:41:21 -0700 | [diff] [blame] | 2965 | 					      &q->state)) { | 
| David S. Miller | 195648b | 2008-08-19 04:00:36 -0700 | [diff] [blame] | 2966 | 					__netif_reschedule(q); | 
| Jarek Poplawski | e8a83e1 | 2008-09-07 18:41:21 -0700 | [diff] [blame] | 2967 | 				} else { | 
 | 2968 | 					smp_mb__before_clear_bit(); | 
 | 2969 | 					clear_bit(__QDISC_STATE_SCHED, | 
 | 2970 | 						  &q->state); | 
 | 2971 | 				} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2972 | 			} | 
 | 2973 | 		} | 
 | 2974 | 	} | 
 | 2975 | } | 
 | 2976 |  | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 2977 | #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ | 
 | 2978 |     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) | 
| Michał Mirosław | da67829 | 2009-06-05 05:35:28 +0000 | [diff] [blame] | 2979 | /* This hook is defined here for ATM LANE */ | 
 | 2980 | int (*br_fdb_test_addr_hook)(struct net_device *dev, | 
 | 2981 | 			     unsigned char *addr) __read_mostly; | 
| Stephen Hemminger | 4fb019a | 2009-09-11 11:50:08 -0700 | [diff] [blame] | 2982 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); | 
| Michał Mirosław | da67829 | 2009-06-05 05:35:28 +0000 | [diff] [blame] | 2983 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2984 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2985 | #ifdef CONFIG_NET_CLS_ACT | 
 | 2986 | /* TODO: Maybe we should just force sch_ingress to be compiled in | 
 | 2987 |  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | 
 | 2988 |  * a compare and 2 stores extra right now if we dont have it on | 
 | 2989 |  * but have CONFIG_NET_CLS_ACT | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 2990 |  * NOTE: This doesnt stop any functionality; if you dont have | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2991 |  * the ingress scheduler, you just cant add policies on ingress. | 
 | 2992 |  * | 
 | 2993 |  */ | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 2994 | static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2995 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2996 | 	struct net_device *dev = skb->dev; | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 2997 | 	u32 ttl = G_TC_RTTL(skb->tc_verd); | 
| David S. Miller | 555353c | 2008-07-08 17:33:13 -0700 | [diff] [blame] | 2998 | 	int result = TC_ACT_OK; | 
 | 2999 | 	struct Qdisc *q; | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 3000 |  | 
| Stephen Hemminger | de38483 | 2010-08-01 00:33:23 -0700 | [diff] [blame] | 3001 | 	if (unlikely(MAX_RED_LOOP < ttl++)) { | 
 | 3002 | 		if (net_ratelimit()) | 
 | 3003 | 			pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", | 
 | 3004 | 			       skb->skb_iif, dev->ifindex); | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3005 | 		return TC_ACT_SHOT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3006 | 	} | 
 | 3007 |  | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3008 | 	skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); | 
 | 3009 | 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); | 
 | 3010 |  | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 3011 | 	q = rxq->qdisc; | 
| David S. Miller | 8d50b53 | 2008-07-30 02:37:46 -0700 | [diff] [blame] | 3012 | 	if (q != &noop_qdisc) { | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 3013 | 		spin_lock(qdisc_lock(q)); | 
| David S. Miller | a9312ae | 2008-08-17 21:51:03 -0700 | [diff] [blame] | 3014 | 		if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) | 
 | 3015 | 			result = qdisc_enqueue_root(skb, q); | 
| David S. Miller | 8387400 | 2008-07-17 00:53:03 -0700 | [diff] [blame] | 3016 | 		spin_unlock(qdisc_lock(q)); | 
 | 3017 | 	} | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3018 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3019 | 	return result; | 
 | 3020 | } | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3021 |  | 
 | 3022 | static inline struct sk_buff *handle_ing(struct sk_buff *skb, | 
 | 3023 | 					 struct packet_type **pt_prev, | 
 | 3024 | 					 int *ret, struct net_device *orig_dev) | 
 | 3025 | { | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 3026 | 	struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); | 
 | 3027 |  | 
 | 3028 | 	if (!rxq || rxq->qdisc == &noop_qdisc) | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3029 | 		goto out; | 
 | 3030 |  | 
 | 3031 | 	if (*pt_prev) { | 
 | 3032 | 		*ret = deliver_skb(skb, *pt_prev, orig_dev); | 
 | 3033 | 		*pt_prev = NULL; | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3034 | 	} | 
 | 3035 |  | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 3036 | 	switch (ing_filter(skb, rxq)) { | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3037 | 	case TC_ACT_SHOT: | 
 | 3038 | 	case TC_ACT_STOLEN: | 
 | 3039 | 		kfree_skb(skb); | 
 | 3040 | 		return NULL; | 
 | 3041 | 	} | 
 | 3042 |  | 
 | 3043 | out: | 
 | 3044 | 	skb->tc_verd = 0; | 
 | 3045 | 	return skb; | 
 | 3046 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3047 | #endif | 
 | 3048 |  | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 3049 | /** | 
 | 3050 |  *	netdev_rx_handler_register - register receive handler | 
 | 3051 |  *	@dev: device to register a handler for | 
 | 3052 |  *	@rx_handler: receive handler to register | 
| Jiri Pirko | 93e2c32 | 2010-06-10 03:34:59 +0000 | [diff] [blame] | 3053 |  *	@rx_handler_data: data pointer that is used by rx handler | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 3054 |  * | 
 | 3055 |  *	Register a receive hander for a device. This handler will then be | 
 | 3056 |  *	called from __netif_receive_skb. A negative errno code is returned | 
 | 3057 |  *	on a failure. | 
 | 3058 |  * | 
 | 3059 |  *	The caller must hold the rtnl_mutex. | 
 | 3060 |  */ | 
 | 3061 | int netdev_rx_handler_register(struct net_device *dev, | 
| Jiri Pirko | 93e2c32 | 2010-06-10 03:34:59 +0000 | [diff] [blame] | 3062 | 			       rx_handler_func_t *rx_handler, | 
 | 3063 | 			       void *rx_handler_data) | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 3064 | { | 
 | 3065 | 	ASSERT_RTNL(); | 
 | 3066 |  | 
 | 3067 | 	if (dev->rx_handler) | 
 | 3068 | 		return -EBUSY; | 
 | 3069 |  | 
| Jiri Pirko | 93e2c32 | 2010-06-10 03:34:59 +0000 | [diff] [blame] | 3070 | 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 3071 | 	rcu_assign_pointer(dev->rx_handler, rx_handler); | 
 | 3072 |  | 
 | 3073 | 	return 0; | 
 | 3074 | } | 
 | 3075 | EXPORT_SYMBOL_GPL(netdev_rx_handler_register); | 
 | 3076 |  | 
 | 3077 | /** | 
 | 3078 |  *	netdev_rx_handler_unregister - unregister receive handler | 
 | 3079 |  *	@dev: device to unregister a handler from | 
 | 3080 |  * | 
 | 3081 |  *	Unregister a receive hander from a device. | 
 | 3082 |  * | 
 | 3083 |  *	The caller must hold the rtnl_mutex. | 
 | 3084 |  */ | 
 | 3085 | void netdev_rx_handler_unregister(struct net_device *dev) | 
 | 3086 | { | 
 | 3087 |  | 
 | 3088 | 	ASSERT_RTNL(); | 
 | 3089 | 	rcu_assign_pointer(dev->rx_handler, NULL); | 
| Jiri Pirko | 93e2c32 | 2010-06-10 03:34:59 +0000 | [diff] [blame] | 3090 | 	rcu_assign_pointer(dev->rx_handler_data, NULL); | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 3091 | } | 
 | 3092 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | 
 | 3093 |  | 
| Eric Dumazet | acbbc07 | 2010-04-11 06:56:11 +0000 | [diff] [blame] | 3094 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, | 
 | 3095 | 					      struct net_device *master) | 
 | 3096 | { | 
 | 3097 | 	if (skb->pkt_type == PACKET_HOST) { | 
 | 3098 | 		u16 *dest = (u16 *) eth_hdr(skb)->h_dest; | 
 | 3099 |  | 
 | 3100 | 		memcpy(dest, master->dev_addr, ETH_ALEN); | 
 | 3101 | 	} | 
 | 3102 | } | 
 | 3103 |  | 
 | 3104 | /* On bonding slaves other than the currently active slave, suppress | 
 | 3105 |  * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and | 
 | 3106 |  * ARP on active-backup slaves with arp_validate enabled. | 
 | 3107 |  */ | 
| Jiri Pirko | d59cfde | 2011-02-12 00:46:06 +0000 | [diff] [blame] | 3108 | static int __skb_bond_should_drop(struct sk_buff *skb, | 
 | 3109 | 				  struct net_device *master) | 
| Eric Dumazet | acbbc07 | 2010-04-11 06:56:11 +0000 | [diff] [blame] | 3110 | { | 
 | 3111 | 	struct net_device *dev = skb->dev; | 
 | 3112 |  | 
 | 3113 | 	if (master->priv_flags & IFF_MASTER_ARPMON) | 
 | 3114 | 		dev->last_rx = jiffies; | 
 | 3115 |  | 
| Jiri Pirko | f350a0a8 | 2010-06-15 06:50:45 +0000 | [diff] [blame] | 3116 | 	if ((master->priv_flags & IFF_MASTER_ALB) && | 
 | 3117 | 	    (master->priv_flags & IFF_BRIDGE_PORT)) { | 
| Eric Dumazet | acbbc07 | 2010-04-11 06:56:11 +0000 | [diff] [blame] | 3118 | 		/* Do address unmangle. The local destination address | 
 | 3119 | 		 * will be always the one master has. Provides the right | 
 | 3120 | 		 * functionality in a bridge. | 
 | 3121 | 		 */ | 
 | 3122 | 		skb_bond_set_mac_by_master(skb, master); | 
 | 3123 | 	} | 
 | 3124 |  | 
 | 3125 | 	if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | 
 | 3126 | 		if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | 
 | 3127 | 		    skb->protocol == __cpu_to_be16(ETH_P_ARP)) | 
 | 3128 | 			return 0; | 
 | 3129 |  | 
 | 3130 | 		if (master->priv_flags & IFF_MASTER_ALB) { | 
 | 3131 | 			if (skb->pkt_type != PACKET_BROADCAST && | 
 | 3132 | 			    skb->pkt_type != PACKET_MULTICAST) | 
 | 3133 | 				return 0; | 
 | 3134 | 		} | 
 | 3135 | 		if (master->priv_flags & IFF_MASTER_8023AD && | 
 | 3136 | 		    skb->protocol == __cpu_to_be16(ETH_P_SLOW)) | 
 | 3137 | 			return 0; | 
 | 3138 |  | 
 | 3139 | 		return 1; | 
 | 3140 | 	} | 
 | 3141 | 	return 0; | 
 | 3142 | } | 
| Eric Dumazet | acbbc07 | 2010-04-11 06:56:11 +0000 | [diff] [blame] | 3143 |  | 
| Eric Dumazet | 10f744d | 2010-03-28 23:07:20 -0700 | [diff] [blame] | 3144 | static int __netif_receive_skb(struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3145 | { | 
 | 3146 | 	struct packet_type *ptype, *pt_prev; | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 3147 | 	rx_handler_func_t *rx_handler; | 
| David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 3148 | 	struct net_device *orig_dev; | 
| Joe Eykholt | 0d7a368 | 2008-07-02 18:22:01 -0700 | [diff] [blame] | 3149 | 	struct net_device *null_or_orig; | 
| John Fastabend | 2df4a0f | 2010-05-12 21:31:11 +0000 | [diff] [blame] | 3150 | 	struct net_device *orig_or_bond; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3151 | 	int ret = NET_RX_DROP; | 
| Al Viro | 252e334 | 2006-11-14 20:48:11 -0800 | [diff] [blame] | 3152 | 	__be16 type; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3153 |  | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 3154 | 	if (!netdev_tstamp_prequeue) | 
 | 3155 | 		net_timestamp_check(skb); | 
| Eric Dumazet | 81bbb3d | 2009-09-30 16:42:42 -0700 | [diff] [blame] | 3156 |  | 
| Koki Sanagi | cf66ba5 | 2010-08-23 18:45:02 +0900 | [diff] [blame] | 3157 | 	trace_netif_receive_skb(skb); | 
| Patrick McHardy | 9b22ea5 | 2008-11-04 14:49:57 -0800 | [diff] [blame] | 3158 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3159 | 	/* if we've gotten here through NAPI, check netpoll */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3160 | 	if (netpoll_receive_skb(skb)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3161 | 		return NET_RX_DROP; | 
 | 3162 |  | 
| Eric Dumazet | 8964be4 | 2009-11-20 15:35:04 -0800 | [diff] [blame] | 3163 | 	if (!skb->skb_iif) | 
 | 3164 | 		skb->skb_iif = skb->dev->ifindex; | 
| David S. Miller | 86e65da | 2005-08-09 19:36:29 -0700 | [diff] [blame] | 3165 |  | 
| John Fastabend | 597a264 | 2010-06-03 09:30:11 +0000 | [diff] [blame] | 3166 | 	/* | 
 | 3167 | 	 * bonding note: skbs received on inactive slaves should only | 
 | 3168 | 	 * be delivered to pkt handlers that are exact matches.  Also | 
 | 3169 | 	 * the deliver_no_wcard flag will be set.  If packet handlers | 
 | 3170 | 	 * are sensitive to duplicate packets these skbs will need to | 
| Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 3171 | 	 * be dropped at the handler. | 
| John Fastabend | 597a264 | 2010-06-03 09:30:11 +0000 | [diff] [blame] | 3172 | 	 */ | 
| Joe Eykholt | 0d7a368 | 2008-07-02 18:22:01 -0700 | [diff] [blame] | 3173 | 	null_or_orig = NULL; | 
| Joe Eykholt | cc9bd5c | 2008-07-02 18:22:00 -0700 | [diff] [blame] | 3174 | 	orig_dev = skb->dev; | 
| John Fastabend | 597a264 | 2010-06-03 09:30:11 +0000 | [diff] [blame] | 3175 | 	if (skb->deliver_no_wcard) | 
 | 3176 | 		null_or_orig = orig_dev; | 
| Jiri Pirko | 1765a57 | 2011-02-12 06:48:36 +0000 | [diff] [blame^] | 3177 | 	else if (netif_is_bond_slave(orig_dev)) { | 
 | 3178 | 		struct net_device *bond_master = ACCESS_ONCE(orig_dev->master); | 
 | 3179 |  | 
 | 3180 | 		if (likely(bond_master)) { | 
 | 3181 | 			if (__skb_bond_should_drop(skb, bond_master)) { | 
 | 3182 | 				skb->deliver_no_wcard = 1; | 
 | 3183 | 				/* deliver only exact match */ | 
 | 3184 | 				null_or_orig = orig_dev; | 
 | 3185 | 			} else | 
 | 3186 | 				skb->dev = bond_master; | 
 | 3187 | 		} | 
| Joe Eykholt | cc9bd5c | 2008-07-02 18:22:00 -0700 | [diff] [blame] | 3188 | 	} | 
| Jay Vosburgh | 8f903c7 | 2006-02-21 16:36:44 -0800 | [diff] [blame] | 3189 |  | 
| Eric Dumazet | 27f39c73e | 2010-05-19 22:07:23 +0000 | [diff] [blame] | 3190 | 	__this_cpu_inc(softnet_data.processed); | 
| Arnaldo Carvalho de Melo | c1d2bbe | 2007-04-10 20:45:18 -0700 | [diff] [blame] | 3191 | 	skb_reset_network_header(skb); | 
| Arnaldo Carvalho de Melo | badff6d | 2007-03-13 13:06:52 -0300 | [diff] [blame] | 3192 | 	skb_reset_transport_header(skb); | 
| Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 3193 | 	skb->mac_len = skb->network_header - skb->mac_header; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3194 |  | 
 | 3195 | 	pt_prev = NULL; | 
 | 3196 |  | 
 | 3197 | 	rcu_read_lock(); | 
 | 3198 |  | 
 | 3199 | #ifdef CONFIG_NET_CLS_ACT | 
 | 3200 | 	if (skb->tc_verd & TC_NCLS) { | 
 | 3201 | 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); | 
 | 3202 | 		goto ncls; | 
 | 3203 | 	} | 
 | 3204 | #endif | 
 | 3205 |  | 
 | 3206 | 	list_for_each_entry_rcu(ptype, &ptype_all, list) { | 
| Joe Eykholt | f982307 | 2008-07-02 18:22:02 -0700 | [diff] [blame] | 3207 | 		if (ptype->dev == null_or_orig || ptype->dev == skb->dev || | 
 | 3208 | 		    ptype->dev == orig_dev) { | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 3209 | 			if (pt_prev) | 
| David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 3210 | 				ret = deliver_skb(skb, pt_prev, orig_dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3211 | 			pt_prev = ptype; | 
 | 3212 | 		} | 
 | 3213 | 	} | 
 | 3214 |  | 
 | 3215 | #ifdef CONFIG_NET_CLS_ACT | 
| Herbert Xu | f697c3e | 2007-10-14 00:38:47 -0700 | [diff] [blame] | 3216 | 	skb = handle_ing(skb, &pt_prev, &ret, orig_dev); | 
 | 3217 | 	if (!skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3218 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3219 | ncls: | 
 | 3220 | #endif | 
 | 3221 |  | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 3222 | 	/* Handle special case of bridge or macvlan */ | 
 | 3223 | 	rx_handler = rcu_dereference(skb->dev->rx_handler); | 
 | 3224 | 	if (rx_handler) { | 
 | 3225 | 		if (pt_prev) { | 
 | 3226 | 			ret = deliver_skb(skb, pt_prev, orig_dev); | 
 | 3227 | 			pt_prev = NULL; | 
 | 3228 | 		} | 
 | 3229 | 		skb = rx_handler(skb); | 
 | 3230 | 		if (!skb) | 
 | 3231 | 			goto out; | 
 | 3232 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3233 |  | 
| Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 3234 | 	if (vlan_tx_tag_present(skb)) { | 
 | 3235 | 		if (pt_prev) { | 
 | 3236 | 			ret = deliver_skb(skb, pt_prev, orig_dev); | 
 | 3237 | 			pt_prev = NULL; | 
 | 3238 | 		} | 
 | 3239 | 		if (vlan_hwaccel_do_receive(&skb)) { | 
 | 3240 | 			ret = __netif_receive_skb(skb); | 
 | 3241 | 			goto out; | 
 | 3242 | 		} else if (unlikely(!skb)) | 
 | 3243 | 			goto out; | 
 | 3244 | 	} | 
 | 3245 |  | 
| Andy Gospodarek | 1f3c880 | 2009-12-14 10:48:58 +0000 | [diff] [blame] | 3246 | 	/* | 
 | 3247 | 	 * Make sure frames received on VLAN interfaces stacked on | 
 | 3248 | 	 * bonding interfaces still make their way to any base bonding | 
 | 3249 | 	 * device that may have registered for a specific ptype.  The | 
 | 3250 | 	 * handler may have to adjust skb->dev and orig_dev. | 
| Andy Gospodarek | 1f3c880 | 2009-12-14 10:48:58 +0000 | [diff] [blame] | 3251 | 	 */ | 
| John Fastabend | 2df4a0f | 2010-05-12 21:31:11 +0000 | [diff] [blame] | 3252 | 	orig_or_bond = orig_dev; | 
| Andy Gospodarek | 1f3c880 | 2009-12-14 10:48:58 +0000 | [diff] [blame] | 3253 | 	if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | 
 | 3254 | 	    (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | 
| John Fastabend | 2df4a0f | 2010-05-12 21:31:11 +0000 | [diff] [blame] | 3255 | 		orig_or_bond = vlan_dev_real_dev(skb->dev); | 
| Andy Gospodarek | 1f3c880 | 2009-12-14 10:48:58 +0000 | [diff] [blame] | 3256 | 	} | 
 | 3257 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3258 | 	type = skb->protocol; | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 3259 | 	list_for_each_entry_rcu(ptype, | 
 | 3260 | 			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 
| Andy Gospodarek | 1f3c880 | 2009-12-14 10:48:58 +0000 | [diff] [blame] | 3261 | 		if (ptype->type == type && (ptype->dev == null_or_orig || | 
| Andy Gospodarek | ca8d9ea | 2010-01-06 12:56:37 +0000 | [diff] [blame] | 3262 | 		     ptype->dev == skb->dev || ptype->dev == orig_dev || | 
| John Fastabend | 2df4a0f | 2010-05-12 21:31:11 +0000 | [diff] [blame] | 3263 | 		     ptype->dev == orig_or_bond)) { | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 3264 | 			if (pt_prev) | 
| David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 3265 | 				ret = deliver_skb(skb, pt_prev, orig_dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3266 | 			pt_prev = ptype; | 
 | 3267 | 		} | 
 | 3268 | 	} | 
 | 3269 |  | 
 | 3270 | 	if (pt_prev) { | 
| David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 3271 | 		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3272 | 	} else { | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 3273 | 		atomic_long_inc(&skb->dev->rx_dropped); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3274 | 		kfree_skb(skb); | 
 | 3275 | 		/* Jamal, now you will not able to escape explaining | 
 | 3276 | 		 * me how you were going to use this. :-) | 
 | 3277 | 		 */ | 
 | 3278 | 		ret = NET_RX_DROP; | 
 | 3279 | 	} | 
 | 3280 |  | 
 | 3281 | out: | 
 | 3282 | 	rcu_read_unlock(); | 
 | 3283 | 	return ret; | 
 | 3284 | } | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 3285 |  | 
 | 3286 | /** | 
 | 3287 |  *	netif_receive_skb - process receive buffer from network | 
 | 3288 |  *	@skb: buffer to process | 
 | 3289 |  * | 
 | 3290 |  *	netif_receive_skb() is the main receive data processing function. | 
 | 3291 |  *	It always succeeds. The buffer may be dropped during processing | 
 | 3292 |  *	for congestion control or by the protocol layers. | 
 | 3293 |  * | 
 | 3294 |  *	This function may only be called from softirq context and interrupts | 
 | 3295 |  *	should be enabled. | 
 | 3296 |  * | 
 | 3297 |  *	Return values (usually ignored): | 
 | 3298 |  *	NET_RX_SUCCESS: no congestion | 
 | 3299 |  *	NET_RX_DROP: packet was dropped | 
 | 3300 |  */ | 
 | 3301 | int netif_receive_skb(struct sk_buff *skb) | 
 | 3302 | { | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 3303 | 	if (netdev_tstamp_prequeue) | 
 | 3304 | 		net_timestamp_check(skb); | 
 | 3305 |  | 
| Richard Cochran | c1f19b5 | 2010-07-17 08:49:36 +0000 | [diff] [blame] | 3306 | 	if (skb_defer_rx_timestamp(skb)) | 
 | 3307 | 		return NET_RX_SUCCESS; | 
 | 3308 |  | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 3309 | #ifdef CONFIG_RPS | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 3310 | 	{ | 
 | 3311 | 		struct rps_dev_flow voidflow, *rflow = &voidflow; | 
 | 3312 | 		int cpu, ret; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 3313 |  | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 3314 | 		rcu_read_lock(); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 3315 |  | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 3316 | 		cpu = get_rps_cpu(skb->dev, skb, &rflow); | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 3317 |  | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 3318 | 		if (cpu >= 0) { | 
 | 3319 | 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); | 
 | 3320 | 			rcu_read_unlock(); | 
 | 3321 | 		} else { | 
 | 3322 | 			rcu_read_unlock(); | 
 | 3323 | 			ret = __netif_receive_skb(skb); | 
 | 3324 | 		} | 
 | 3325 |  | 
 | 3326 | 		return ret; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 3327 | 	} | 
| Tom Herbert | 1e94d72 | 2010-03-18 17:45:44 -0700 | [diff] [blame] | 3328 | #else | 
 | 3329 | 	return __netif_receive_skb(skb); | 
 | 3330 | #endif | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 3331 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 3332 | EXPORT_SYMBOL(netif_receive_skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3333 |  | 
| Eric Dumazet | 8875127 | 2010-04-19 05:07:33 +0000 | [diff] [blame] | 3334 | /* Network device is going away, flush any packets still pending | 
 | 3335 |  * Called with irqs disabled. | 
 | 3336 |  */ | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 3337 | static void flush_backlog(void *arg) | 
| Stephen Hemminger | 6e583ce | 2008-08-03 21:29:57 -0700 | [diff] [blame] | 3338 | { | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 3339 | 	struct net_device *dev = arg; | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 3340 | 	struct softnet_data *sd = &__get_cpu_var(softnet_data); | 
| Stephen Hemminger | 6e583ce | 2008-08-03 21:29:57 -0700 | [diff] [blame] | 3341 | 	struct sk_buff *skb, *tmp; | 
 | 3342 |  | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 3343 | 	rps_lock(sd); | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3344 | 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { | 
| Stephen Hemminger | 6e583ce | 2008-08-03 21:29:57 -0700 | [diff] [blame] | 3345 | 		if (skb->dev == dev) { | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 3346 | 			__skb_unlink(skb, &sd->input_pkt_queue); | 
| Stephen Hemminger | 6e583ce | 2008-08-03 21:29:57 -0700 | [diff] [blame] | 3347 | 			kfree_skb(skb); | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 3348 | 			input_queue_head_incr(sd); | 
| Stephen Hemminger | 6e583ce | 2008-08-03 21:29:57 -0700 | [diff] [blame] | 3349 | 		} | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3350 | 	} | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 3351 | 	rps_unlock(sd); | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3352 |  | 
 | 3353 | 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) { | 
 | 3354 | 		if (skb->dev == dev) { | 
 | 3355 | 			__skb_unlink(skb, &sd->process_queue); | 
 | 3356 | 			kfree_skb(skb); | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 3357 | 			input_queue_head_incr(sd); | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3358 | 		} | 
 | 3359 | 	} | 
| Stephen Hemminger | 6e583ce | 2008-08-03 21:29:57 -0700 | [diff] [blame] | 3360 | } | 
 | 3361 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3362 | static int napi_gro_complete(struct sk_buff *skb) | 
 | 3363 | { | 
 | 3364 | 	struct packet_type *ptype; | 
 | 3365 | 	__be16 type = skb->protocol; | 
 | 3366 | 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 
 | 3367 | 	int err = -ENOENT; | 
 | 3368 |  | 
| Herbert Xu | fc59f9a | 2009-04-14 15:11:06 -0700 | [diff] [blame] | 3369 | 	if (NAPI_GRO_CB(skb)->count == 1) { | 
 | 3370 | 		skb_shinfo(skb)->gso_size = 0; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3371 | 		goto out; | 
| Herbert Xu | fc59f9a | 2009-04-14 15:11:06 -0700 | [diff] [blame] | 3372 | 	} | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3373 |  | 
 | 3374 | 	rcu_read_lock(); | 
 | 3375 | 	list_for_each_entry_rcu(ptype, head, list) { | 
 | 3376 | 		if (ptype->type != type || ptype->dev || !ptype->gro_complete) | 
 | 3377 | 			continue; | 
 | 3378 |  | 
 | 3379 | 		err = ptype->gro_complete(skb); | 
 | 3380 | 		break; | 
 | 3381 | 	} | 
 | 3382 | 	rcu_read_unlock(); | 
 | 3383 |  | 
 | 3384 | 	if (err) { | 
 | 3385 | 		WARN_ON(&ptype->list == head); | 
 | 3386 | 		kfree_skb(skb); | 
 | 3387 | 		return NET_RX_SUCCESS; | 
 | 3388 | 	} | 
 | 3389 |  | 
 | 3390 | out: | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3391 | 	return netif_receive_skb(skb); | 
 | 3392 | } | 
 | 3393 |  | 
| Eric Dumazet | 86cac58 | 2010-08-31 18:25:32 +0000 | [diff] [blame] | 3394 | inline void napi_gro_flush(struct napi_struct *napi) | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3395 | { | 
 | 3396 | 	struct sk_buff *skb, *next; | 
 | 3397 |  | 
 | 3398 | 	for (skb = napi->gro_list; skb; skb = next) { | 
 | 3399 | 		next = skb->next; | 
 | 3400 | 		skb->next = NULL; | 
 | 3401 | 		napi_gro_complete(skb); | 
 | 3402 | 	} | 
 | 3403 |  | 
| Herbert Xu | 4ae5544 | 2009-02-08 18:00:36 +0000 | [diff] [blame] | 3404 | 	napi->gro_count = 0; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3405 | 	napi->gro_list = NULL; | 
 | 3406 | } | 
| Eric Dumazet | 86cac58 | 2010-08-31 18:25:32 +0000 | [diff] [blame] | 3407 | EXPORT_SYMBOL(napi_gro_flush); | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3408 |  | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 3409 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3410 | { | 
 | 3411 | 	struct sk_buff **pp = NULL; | 
 | 3412 | 	struct packet_type *ptype; | 
 | 3413 | 	__be16 type = skb->protocol; | 
 | 3414 | 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 
| Herbert Xu | 0da2afd5 | 2008-12-26 14:57:42 -0800 | [diff] [blame] | 3415 | 	int same_flow; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3416 | 	int mac_len; | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 3417 | 	enum gro_result ret; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3418 |  | 
| Jarek Poplawski | ce9e76c | 2010-08-05 01:19:11 +0000 | [diff] [blame] | 3419 | 	if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3420 | 		goto normal; | 
 | 3421 |  | 
| David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3422 | 	if (skb_is_gso(skb) || skb_has_frag_list(skb)) | 
| Herbert Xu | f17f5c9 | 2009-01-14 14:36:12 -0800 | [diff] [blame] | 3423 | 		goto normal; | 
 | 3424 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3425 | 	rcu_read_lock(); | 
 | 3426 | 	list_for_each_entry_rcu(ptype, head, list) { | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3427 | 		if (ptype->type != type || ptype->dev || !ptype->gro_receive) | 
 | 3428 | 			continue; | 
 | 3429 |  | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 3430 | 		skb_set_network_header(skb, skb_gro_offset(skb)); | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3431 | 		mac_len = skb->network_header - skb->mac_header; | 
 | 3432 | 		skb->mac_len = mac_len; | 
 | 3433 | 		NAPI_GRO_CB(skb)->same_flow = 0; | 
 | 3434 | 		NAPI_GRO_CB(skb)->flush = 0; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3435 | 		NAPI_GRO_CB(skb)->free = 0; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3436 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3437 | 		pp = ptype->gro_receive(&napi->gro_list, skb); | 
 | 3438 | 		break; | 
 | 3439 | 	} | 
 | 3440 | 	rcu_read_unlock(); | 
 | 3441 |  | 
 | 3442 | 	if (&ptype->list == head) | 
 | 3443 | 		goto normal; | 
 | 3444 |  | 
| Herbert Xu | 0da2afd5 | 2008-12-26 14:57:42 -0800 | [diff] [blame] | 3445 | 	same_flow = NAPI_GRO_CB(skb)->same_flow; | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3446 | 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; | 
| Herbert Xu | 0da2afd5 | 2008-12-26 14:57:42 -0800 | [diff] [blame] | 3447 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3448 | 	if (pp) { | 
 | 3449 | 		struct sk_buff *nskb = *pp; | 
 | 3450 |  | 
 | 3451 | 		*pp = nskb->next; | 
 | 3452 | 		nskb->next = NULL; | 
 | 3453 | 		napi_gro_complete(nskb); | 
| Herbert Xu | 4ae5544 | 2009-02-08 18:00:36 +0000 | [diff] [blame] | 3454 | 		napi->gro_count--; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3455 | 	} | 
 | 3456 |  | 
| Herbert Xu | 0da2afd5 | 2008-12-26 14:57:42 -0800 | [diff] [blame] | 3457 | 	if (same_flow) | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3458 | 		goto ok; | 
 | 3459 |  | 
| Herbert Xu | 4ae5544 | 2009-02-08 18:00:36 +0000 | [diff] [blame] | 3460 | 	if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS) | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3461 | 		goto normal; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3462 |  | 
| Herbert Xu | 4ae5544 | 2009-02-08 18:00:36 +0000 | [diff] [blame] | 3463 | 	napi->gro_count++; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3464 | 	NAPI_GRO_CB(skb)->count = 1; | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 3465 | 	skb_shinfo(skb)->gso_size = skb_gro_len(skb); | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3466 | 	skb->next = napi->gro_list; | 
 | 3467 | 	napi->gro_list = skb; | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3468 | 	ret = GRO_HELD; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3469 |  | 
| Herbert Xu | ad0f990 | 2009-02-01 01:24:55 -0800 | [diff] [blame] | 3470 | pull: | 
| Herbert Xu | cb18978 | 2009-05-26 18:50:31 +0000 | [diff] [blame] | 3471 | 	if (skb_headlen(skb) < skb_gro_offset(skb)) { | 
 | 3472 | 		int grow = skb_gro_offset(skb) - skb_headlen(skb); | 
 | 3473 |  | 
 | 3474 | 		BUG_ON(skb->end - skb->tail < grow); | 
 | 3475 |  | 
 | 3476 | 		memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); | 
 | 3477 |  | 
 | 3478 | 		skb->tail += grow; | 
 | 3479 | 		skb->data_len -= grow; | 
 | 3480 |  | 
 | 3481 | 		skb_shinfo(skb)->frags[0].page_offset += grow; | 
 | 3482 | 		skb_shinfo(skb)->frags[0].size -= grow; | 
 | 3483 |  | 
 | 3484 | 		if (unlikely(!skb_shinfo(skb)->frags[0].size)) { | 
 | 3485 | 			put_page(skb_shinfo(skb)->frags[0].page); | 
 | 3486 | 			memmove(skb_shinfo(skb)->frags, | 
 | 3487 | 				skb_shinfo(skb)->frags + 1, | 
| Jarek Poplawski | e5093ae | 2010-08-11 02:02:10 +0000 | [diff] [blame] | 3488 | 				--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); | 
| Herbert Xu | cb18978 | 2009-05-26 18:50:31 +0000 | [diff] [blame] | 3489 | 		} | 
| Herbert Xu | ad0f990 | 2009-02-01 01:24:55 -0800 | [diff] [blame] | 3490 | 	} | 
 | 3491 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3492 | ok: | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3493 | 	return ret; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3494 |  | 
 | 3495 | normal: | 
| Herbert Xu | ad0f990 | 2009-02-01 01:24:55 -0800 | [diff] [blame] | 3496 | 	ret = GRO_NORMAL; | 
 | 3497 | 	goto pull; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3498 | } | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3499 | EXPORT_SYMBOL(dev_gro_receive); | 
 | 3500 |  | 
| Eric Dumazet | 40d0802 | 2010-08-26 22:03:08 -0700 | [diff] [blame] | 3501 | static inline gro_result_t | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 3502 | __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3503 | { | 
 | 3504 | 	struct sk_buff *p; | 
 | 3505 |  | 
 | 3506 | 	for (p = napi->gro_list; p; p = p->next) { | 
| Eric Dumazet | 40d0802 | 2010-08-26 22:03:08 -0700 | [diff] [blame] | 3507 | 		unsigned long diffs; | 
 | 3508 |  | 
 | 3509 | 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; | 
| Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 3510 | 		diffs |= p->vlan_tci ^ skb->vlan_tci; | 
| Eric Dumazet | 40d0802 | 2010-08-26 22:03:08 -0700 | [diff] [blame] | 3511 | 		diffs |= compare_ether_header(skb_mac_header(p), | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 3512 | 					      skb_gro_mac_header(skb)); | 
| Eric Dumazet | 40d0802 | 2010-08-26 22:03:08 -0700 | [diff] [blame] | 3513 | 		NAPI_GRO_CB(p)->same_flow = !diffs; | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3514 | 		NAPI_GRO_CB(p)->flush = 0; | 
 | 3515 | 	} | 
 | 3516 |  | 
 | 3517 | 	return dev_gro_receive(napi, skb); | 
 | 3518 | } | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3519 |  | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3520 | gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3521 | { | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3522 | 	switch (ret) { | 
 | 3523 | 	case GRO_NORMAL: | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3524 | 		if (netif_receive_skb(skb)) | 
 | 3525 | 			ret = GRO_DROP; | 
 | 3526 | 		break; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3527 |  | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3528 | 	case GRO_DROP: | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3529 | 	case GRO_MERGED_FREE: | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3530 | 		kfree_skb(skb); | 
 | 3531 | 		break; | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 3532 |  | 
 | 3533 | 	case GRO_HELD: | 
 | 3534 | 	case GRO_MERGED: | 
 | 3535 | 		break; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3536 | 	} | 
 | 3537 |  | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3538 | 	return ret; | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3539 | } | 
 | 3540 | EXPORT_SYMBOL(napi_skb_finish); | 
 | 3541 |  | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 3542 | void skb_gro_reset_offset(struct sk_buff *skb) | 
 | 3543 | { | 
 | 3544 | 	NAPI_GRO_CB(skb)->data_offset = 0; | 
 | 3545 | 	NAPI_GRO_CB(skb)->frag0 = NULL; | 
| Herbert Xu | 7489594 | 2009-05-26 18:50:27 +0000 | [diff] [blame] | 3546 | 	NAPI_GRO_CB(skb)->frag0_len = 0; | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 3547 |  | 
| Herbert Xu | 78d3fd0 | 2009-05-26 18:50:23 +0000 | [diff] [blame] | 3548 | 	if (skb->mac_header == skb->tail && | 
| Herbert Xu | 7489594 | 2009-05-26 18:50:27 +0000 | [diff] [blame] | 3549 | 	    !PageHighMem(skb_shinfo(skb)->frags[0].page)) { | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 3550 | 		NAPI_GRO_CB(skb)->frag0 = | 
 | 3551 | 			page_address(skb_shinfo(skb)->frags[0].page) + | 
 | 3552 | 			skb_shinfo(skb)->frags[0].page_offset; | 
| Herbert Xu | 7489594 | 2009-05-26 18:50:27 +0000 | [diff] [blame] | 3553 | 		NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size; | 
 | 3554 | 	} | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 3555 | } | 
 | 3556 | EXPORT_SYMBOL(skb_gro_reset_offset); | 
 | 3557 |  | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3558 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3559 | { | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 3560 | 	skb_gro_reset_offset(skb); | 
 | 3561 |  | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3562 | 	return napi_skb_finish(__napi_gro_receive(napi, skb), skb); | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3563 | } | 
 | 3564 | EXPORT_SYMBOL(napi_gro_receive); | 
 | 3565 |  | 
| stephen hemminger | d0c2b0d | 2010-10-19 07:12:10 +0000 | [diff] [blame] | 3566 | static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3567 | { | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3568 | 	__skb_pull(skb, skb_headlen(skb)); | 
 | 3569 | 	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); | 
| Jesse Gross | 3701e51 | 2010-10-20 13:56:06 +0000 | [diff] [blame] | 3570 | 	skb->vlan_tci = 0; | 
| Herbert Xu | 66c46d7 | 2011-01-29 20:44:54 -0800 | [diff] [blame] | 3571 | 	skb->dev = napi->dev; | 
| Andy Gospodarek | 6d152e2 | 2011-02-02 14:53:25 -0800 | [diff] [blame] | 3572 | 	skb->skb_iif = 0; | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3573 |  | 
 | 3574 | 	napi->skb = skb; | 
 | 3575 | } | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3576 |  | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3577 | struct sk_buff *napi_get_frags(struct napi_struct *napi) | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3578 | { | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3579 | 	struct sk_buff *skb = napi->skb; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3580 |  | 
 | 3581 | 	if (!skb) { | 
| Eric Dumazet | 89d71a6 | 2009-10-13 05:34:20 +0000 | [diff] [blame] | 3582 | 		skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); | 
 | 3583 | 		if (skb) | 
 | 3584 | 			napi->skb = skb; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3585 | 	} | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3586 | 	return skb; | 
 | 3587 | } | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3588 | EXPORT_SYMBOL(napi_get_frags); | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3589 |  | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3590 | gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, | 
 | 3591 | 			       gro_result_t ret) | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3592 | { | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3593 | 	switch (ret) { | 
 | 3594 | 	case GRO_NORMAL: | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 3595 | 	case GRO_HELD: | 
| Ajit Khaparde | e76b69c | 2010-02-16 20:25:43 +0000 | [diff] [blame] | 3596 | 		skb->protocol = eth_type_trans(skb, skb->dev); | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 3597 |  | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3598 | 		if (ret == GRO_HELD) | 
 | 3599 | 			skb_gro_pull(skb, -ETH_HLEN); | 
 | 3600 | 		else if (netif_receive_skb(skb)) | 
 | 3601 | 			ret = GRO_DROP; | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 3602 | 		break; | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3603 |  | 
 | 3604 | 	case GRO_DROP: | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3605 | 	case GRO_MERGED_FREE: | 
 | 3606 | 		napi_reuse_skb(napi, skb); | 
 | 3607 | 		break; | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 3608 |  | 
 | 3609 | 	case GRO_MERGED: | 
 | 3610 | 		break; | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3611 | 	} | 
 | 3612 |  | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3613 | 	return ret; | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3614 | } | 
 | 3615 | EXPORT_SYMBOL(napi_frags_finish); | 
 | 3616 |  | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3617 | struct sk_buff *napi_frags_skb(struct napi_struct *napi) | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3618 | { | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3619 | 	struct sk_buff *skb = napi->skb; | 
 | 3620 | 	struct ethhdr *eth; | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 3621 | 	unsigned int hlen; | 
 | 3622 | 	unsigned int off; | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3623 |  | 
 | 3624 | 	napi->skb = NULL; | 
 | 3625 |  | 
 | 3626 | 	skb_reset_mac_header(skb); | 
 | 3627 | 	skb_gro_reset_offset(skb); | 
 | 3628 |  | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 3629 | 	off = skb_gro_offset(skb); | 
 | 3630 | 	hlen = off + sizeof(*eth); | 
 | 3631 | 	eth = skb_gro_header_fast(skb, off); | 
 | 3632 | 	if (skb_gro_header_hard(skb, hlen)) { | 
 | 3633 | 		eth = skb_gro_header_slow(skb, hlen, off); | 
 | 3634 | 		if (unlikely(!eth)) { | 
 | 3635 | 			napi_reuse_skb(napi, skb); | 
 | 3636 | 			skb = NULL; | 
 | 3637 | 			goto out; | 
 | 3638 | 		} | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3639 | 	} | 
 | 3640 |  | 
 | 3641 | 	skb_gro_pull(skb, sizeof(*eth)); | 
 | 3642 |  | 
 | 3643 | 	/* | 
 | 3644 | 	 * This works because the only protocols we care about don't require | 
 | 3645 | 	 * special handling.  We'll fix it up properly at the end. | 
 | 3646 | 	 */ | 
 | 3647 | 	skb->protocol = eth->h_proto; | 
 | 3648 |  | 
 | 3649 | out: | 
 | 3650 | 	return skb; | 
 | 3651 | } | 
 | 3652 | EXPORT_SYMBOL(napi_frags_skb); | 
 | 3653 |  | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3654 | gro_result_t napi_gro_frags(struct napi_struct *napi) | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3655 | { | 
 | 3656 | 	struct sk_buff *skb = napi_frags_skb(napi); | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3657 |  | 
 | 3658 | 	if (!skb) | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 3659 | 		return GRO_DROP; | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 3660 |  | 
| Herbert Xu | 5d0d9be | 2009-01-29 14:19:48 +0000 | [diff] [blame] | 3661 | 	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3662 | } | 
 | 3663 | EXPORT_SYMBOL(napi_gro_frags); | 
 | 3664 |  | 
| Eric Dumazet | e326bed | 2010-04-22 00:22:45 -0700 | [diff] [blame] | 3665 | /* | 
 | 3666 |  * net_rps_action sends any pending IPI's for rps. | 
 | 3667 |  * Note: called with local irq disabled, but exits with local irq enabled. | 
 | 3668 |  */ | 
 | 3669 | static void net_rps_action_and_irq_enable(struct softnet_data *sd) | 
 | 3670 | { | 
 | 3671 | #ifdef CONFIG_RPS | 
 | 3672 | 	struct softnet_data *remsd = sd->rps_ipi_list; | 
 | 3673 |  | 
 | 3674 | 	if (remsd) { | 
 | 3675 | 		sd->rps_ipi_list = NULL; | 
 | 3676 |  | 
 | 3677 | 		local_irq_enable(); | 
 | 3678 |  | 
 | 3679 | 		/* Send pending IPI's to kick RPS processing on remote cpus. */ | 
 | 3680 | 		while (remsd) { | 
 | 3681 | 			struct softnet_data *next = remsd->rps_ipi_next; | 
 | 3682 |  | 
 | 3683 | 			if (cpu_online(remsd->cpu)) | 
 | 3684 | 				__smp_call_function_single(remsd->cpu, | 
 | 3685 | 							   &remsd->csd, 0); | 
 | 3686 | 			remsd = next; | 
 | 3687 | 		} | 
 | 3688 | 	} else | 
 | 3689 | #endif | 
 | 3690 | 		local_irq_enable(); | 
 | 3691 | } | 
 | 3692 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3693 | static int process_backlog(struct napi_struct *napi, int quota) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3694 | { | 
 | 3695 | 	int work = 0; | 
| Eric Dumazet | eecfd7c | 2010-05-06 22:07:48 -0700 | [diff] [blame] | 3696 | 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3697 |  | 
| Eric Dumazet | e326bed | 2010-04-22 00:22:45 -0700 | [diff] [blame] | 3698 | #ifdef CONFIG_RPS | 
 | 3699 | 	/* Check if we have pending ipi, its better to send them now, | 
 | 3700 | 	 * not waiting net_rx_action() end. | 
 | 3701 | 	 */ | 
 | 3702 | 	if (sd->rps_ipi_list) { | 
 | 3703 | 		local_irq_disable(); | 
 | 3704 | 		net_rps_action_and_irq_enable(sd); | 
 | 3705 | 	} | 
 | 3706 | #endif | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3707 | 	napi->weight = weight_p; | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3708 | 	local_irq_disable(); | 
 | 3709 | 	while (work < quota) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3710 | 		struct sk_buff *skb; | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3711 | 		unsigned int qlen; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3712 |  | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3713 | 		while ((skb = __skb_dequeue(&sd->process_queue))) { | 
| Eric Dumazet | e400827 | 2010-04-05 15:42:39 -0700 | [diff] [blame] | 3714 | 			local_irq_enable(); | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3715 | 			__netif_receive_skb(skb); | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3716 | 			local_irq_disable(); | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 3717 | 			input_queue_head_incr(sd); | 
 | 3718 | 			if (++work >= quota) { | 
 | 3719 | 				local_irq_enable(); | 
 | 3720 | 				return work; | 
 | 3721 | 			} | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3722 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3723 |  | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3724 | 		rps_lock(sd); | 
 | 3725 | 		qlen = skb_queue_len(&sd->input_pkt_queue); | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 3726 | 		if (qlen) | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3727 | 			skb_queue_splice_tail_init(&sd->input_pkt_queue, | 
 | 3728 | 						   &sd->process_queue); | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 3729 |  | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3730 | 		if (qlen < quota - work) { | 
| Eric Dumazet | eecfd7c | 2010-05-06 22:07:48 -0700 | [diff] [blame] | 3731 | 			/* | 
 | 3732 | 			 * Inline a custom version of __napi_complete(). | 
 | 3733 | 			 * only current cpu owns and manipulates this napi, | 
 | 3734 | 			 * and NAPI_STATE_SCHED is the only possible flag set on backlog. | 
 | 3735 | 			 * we can use a plain write instead of clear_bit(), | 
 | 3736 | 			 * and we dont need an smp_mb() memory barrier. | 
 | 3737 | 			 */ | 
 | 3738 | 			list_del(&napi->poll_list); | 
 | 3739 | 			napi->state = 0; | 
 | 3740 |  | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 3741 | 			quota = work + qlen; | 
 | 3742 | 		} | 
 | 3743 | 		rps_unlock(sd); | 
 | 3744 | 	} | 
 | 3745 | 	local_irq_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3746 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3747 | 	return work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3748 | } | 
 | 3749 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3750 | /** | 
 | 3751 |  * __napi_schedule - schedule for receive | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 3752 |  * @n: entry to schedule | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3753 |  * | 
 | 3754 |  * The entry's receive function will be scheduled to run | 
 | 3755 |  */ | 
| Harvey Harrison | b5606c2 | 2008-02-13 15:03:16 -0800 | [diff] [blame] | 3756 | void __napi_schedule(struct napi_struct *n) | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3757 | { | 
 | 3758 | 	unsigned long flags; | 
 | 3759 |  | 
 | 3760 | 	local_irq_save(flags); | 
| Eric Dumazet | eecfd7c | 2010-05-06 22:07:48 -0700 | [diff] [blame] | 3761 | 	____napi_schedule(&__get_cpu_var(softnet_data), n); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3762 | 	local_irq_restore(flags); | 
 | 3763 | } | 
 | 3764 | EXPORT_SYMBOL(__napi_schedule); | 
 | 3765 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3766 | void __napi_complete(struct napi_struct *n) | 
 | 3767 | { | 
 | 3768 | 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 
 | 3769 | 	BUG_ON(n->gro_list); | 
 | 3770 |  | 
 | 3771 | 	list_del(&n->poll_list); | 
 | 3772 | 	smp_mb__before_clear_bit(); | 
 | 3773 | 	clear_bit(NAPI_STATE_SCHED, &n->state); | 
 | 3774 | } | 
 | 3775 | EXPORT_SYMBOL(__napi_complete); | 
 | 3776 |  | 
 | 3777 | void napi_complete(struct napi_struct *n) | 
 | 3778 | { | 
 | 3779 | 	unsigned long flags; | 
 | 3780 |  | 
 | 3781 | 	/* | 
 | 3782 | 	 * don't let napi dequeue from the cpu poll list | 
 | 3783 | 	 * just in case its running on a different cpu | 
 | 3784 | 	 */ | 
 | 3785 | 	if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) | 
 | 3786 | 		return; | 
 | 3787 |  | 
 | 3788 | 	napi_gro_flush(n); | 
 | 3789 | 	local_irq_save(flags); | 
 | 3790 | 	__napi_complete(n); | 
 | 3791 | 	local_irq_restore(flags); | 
 | 3792 | } | 
 | 3793 | EXPORT_SYMBOL(napi_complete); | 
 | 3794 |  | 
 | 3795 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | 
 | 3796 | 		    int (*poll)(struct napi_struct *, int), int weight) | 
 | 3797 | { | 
 | 3798 | 	INIT_LIST_HEAD(&napi->poll_list); | 
| Herbert Xu | 4ae5544 | 2009-02-08 18:00:36 +0000 | [diff] [blame] | 3799 | 	napi->gro_count = 0; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3800 | 	napi->gro_list = NULL; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3801 | 	napi->skb = NULL; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3802 | 	napi->poll = poll; | 
 | 3803 | 	napi->weight = weight; | 
 | 3804 | 	list_add(&napi->dev_list, &dev->napi_list); | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3805 | 	napi->dev = dev; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3806 | #ifdef CONFIG_NETPOLL | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3807 | 	spin_lock_init(&napi->poll_lock); | 
 | 3808 | 	napi->poll_owner = -1; | 
 | 3809 | #endif | 
 | 3810 | 	set_bit(NAPI_STATE_SCHED, &napi->state); | 
 | 3811 | } | 
 | 3812 | EXPORT_SYMBOL(netif_napi_add); | 
 | 3813 |  | 
 | 3814 | void netif_napi_del(struct napi_struct *napi) | 
 | 3815 | { | 
 | 3816 | 	struct sk_buff *skb, *next; | 
 | 3817 |  | 
| Peter P Waskiewicz Jr | d7b0663 | 2008-12-26 01:35:35 -0800 | [diff] [blame] | 3818 | 	list_del_init(&napi->dev_list); | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 3819 | 	napi_free_frags(napi); | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3820 |  | 
 | 3821 | 	for (skb = napi->gro_list; skb; skb = next) { | 
 | 3822 | 		next = skb->next; | 
 | 3823 | 		skb->next = NULL; | 
 | 3824 | 		kfree_skb(skb); | 
 | 3825 | 	} | 
 | 3826 |  | 
 | 3827 | 	napi->gro_list = NULL; | 
| Herbert Xu | 4ae5544 | 2009-02-08 18:00:36 +0000 | [diff] [blame] | 3828 | 	napi->gro_count = 0; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 3829 | } | 
 | 3830 | EXPORT_SYMBOL(netif_napi_del); | 
 | 3831 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3832 | static void net_rx_action(struct softirq_action *h) | 
 | 3833 | { | 
| Eric Dumazet | e326bed | 2010-04-22 00:22:45 -0700 | [diff] [blame] | 3834 | 	struct softnet_data *sd = &__get_cpu_var(softnet_data); | 
| Stephen Hemminger | 24f8b23 | 2008-11-03 17:14:38 -0800 | [diff] [blame] | 3835 | 	unsigned long time_limit = jiffies + 2; | 
| Stephen Hemminger | 51b0bde | 2005-06-23 20:14:40 -0700 | [diff] [blame] | 3836 | 	int budget = netdev_budget; | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 3837 | 	void *have; | 
 | 3838 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3839 | 	local_irq_disable(); | 
 | 3840 |  | 
| Eric Dumazet | e326bed | 2010-04-22 00:22:45 -0700 | [diff] [blame] | 3841 | 	while (!list_empty(&sd->poll_list)) { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3842 | 		struct napi_struct *n; | 
 | 3843 | 		int work, weight; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3844 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3845 | 		/* If softirq window is exhuasted then punt. | 
| Stephen Hemminger | 24f8b23 | 2008-11-03 17:14:38 -0800 | [diff] [blame] | 3846 | 		 * Allow this to run for 2 jiffies since which will allow | 
 | 3847 | 		 * an average latency of 1.5/HZ. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3848 | 		 */ | 
| Stephen Hemminger | 24f8b23 | 2008-11-03 17:14:38 -0800 | [diff] [blame] | 3849 | 		if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3850 | 			goto softnet_break; | 
 | 3851 |  | 
 | 3852 | 		local_irq_enable(); | 
 | 3853 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3854 | 		/* Even though interrupts have been re-enabled, this | 
 | 3855 | 		 * access is safe because interrupts can only add new | 
 | 3856 | 		 * entries to the tail of this list, and only ->poll() | 
 | 3857 | 		 * calls can remove this head entry from the list. | 
 | 3858 | 		 */ | 
| Eric Dumazet | e326bed | 2010-04-22 00:22:45 -0700 | [diff] [blame] | 3859 | 		n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3860 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3861 | 		have = netpoll_poll_lock(n); | 
 | 3862 |  | 
 | 3863 | 		weight = n->weight; | 
 | 3864 |  | 
| David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 3865 | 		/* This NAPI_STATE_SCHED test is for avoiding a race | 
 | 3866 | 		 * with netpoll's poll_napi().  Only the entity which | 
 | 3867 | 		 * obtains the lock and sees NAPI_STATE_SCHED set will | 
 | 3868 | 		 * actually make the ->poll() call.  Therefore we avoid | 
 | 3869 | 		 * accidently calling ->poll() when NAPI is not scheduled. | 
 | 3870 | 		 */ | 
 | 3871 | 		work = 0; | 
| Neil Horman | 4ea7e38 | 2009-05-21 07:36:08 +0000 | [diff] [blame] | 3872 | 		if (test_bit(NAPI_STATE_SCHED, &n->state)) { | 
| David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 3873 | 			work = n->poll(n, weight); | 
| Neil Horman | 4ea7e38 | 2009-05-21 07:36:08 +0000 | [diff] [blame] | 3874 | 			trace_napi_poll(n); | 
 | 3875 | 		} | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3876 |  | 
 | 3877 | 		WARN_ON_ONCE(work > weight); | 
 | 3878 |  | 
 | 3879 | 		budget -= work; | 
 | 3880 |  | 
 | 3881 | 		local_irq_disable(); | 
 | 3882 |  | 
 | 3883 | 		/* Drivers must not modify the NAPI state if they | 
 | 3884 | 		 * consume the entire weight.  In such cases this code | 
 | 3885 | 		 * still "owns" the NAPI instance and therefore can | 
 | 3886 | 		 * move the instance around on the list at-will. | 
 | 3887 | 		 */ | 
| David S. Miller | fed17f3 | 2008-01-07 21:00:40 -0800 | [diff] [blame] | 3888 | 		if (unlikely(work == weight)) { | 
| Herbert Xu | ff780cd | 2009-06-26 19:27:04 -0700 | [diff] [blame] | 3889 | 			if (unlikely(napi_disable_pending(n))) { | 
 | 3890 | 				local_irq_enable(); | 
 | 3891 | 				napi_complete(n); | 
 | 3892 | 				local_irq_disable(); | 
 | 3893 | 			} else | 
| Eric Dumazet | e326bed | 2010-04-22 00:22:45 -0700 | [diff] [blame] | 3894 | 				list_move_tail(&n->poll_list, &sd->poll_list); | 
| David S. Miller | fed17f3 | 2008-01-07 21:00:40 -0800 | [diff] [blame] | 3895 | 		} | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3896 |  | 
 | 3897 | 		netpoll_poll_unlock(have); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3898 | 	} | 
 | 3899 | out: | 
| Eric Dumazet | e326bed | 2010-04-22 00:22:45 -0700 | [diff] [blame] | 3900 | 	net_rps_action_and_irq_enable(sd); | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 3901 |  | 
| Chris Leech | db21733 | 2006-06-17 21:24:58 -0700 | [diff] [blame] | 3902 | #ifdef CONFIG_NET_DMA | 
 | 3903 | 	/* | 
 | 3904 | 	 * There may not be any more sk_buffs coming right now, so push | 
 | 3905 | 	 * any pending DMA copies to hardware | 
 | 3906 | 	 */ | 
| Dan Williams | 2ba0562 | 2009-01-06 11:38:14 -0700 | [diff] [blame] | 3907 | 	dma_issue_pending_all(); | 
| Chris Leech | db21733 | 2006-06-17 21:24:58 -0700 | [diff] [blame] | 3908 | #endif | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 3909 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3910 | 	return; | 
 | 3911 |  | 
 | 3912 | softnet_break: | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 3913 | 	sd->time_squeeze++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3914 | 	__raise_softirq_irqoff(NET_RX_SOFTIRQ); | 
 | 3915 | 	goto out; | 
 | 3916 | } | 
 | 3917 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 3918 | static gifconf_func_t *gifconf_list[NPROTO]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3919 |  | 
 | 3920 | /** | 
 | 3921 |  *	register_gifconf	-	register a SIOCGIF handler | 
 | 3922 |  *	@family: Address family | 
 | 3923 |  *	@gifconf: Function handler | 
 | 3924 |  * | 
 | 3925 |  *	Register protocol dependent address dumping routines. The handler | 
 | 3926 |  *	that is passed must not be freed or reused until it has been replaced | 
 | 3927 |  *	by another handler. | 
 | 3928 |  */ | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 3929 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3930 | { | 
 | 3931 | 	if (family >= NPROTO) | 
 | 3932 | 		return -EINVAL; | 
 | 3933 | 	gifconf_list[family] = gifconf; | 
 | 3934 | 	return 0; | 
 | 3935 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 3936 | EXPORT_SYMBOL(register_gifconf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3937 |  | 
 | 3938 |  | 
 | 3939 | /* | 
 | 3940 |  *	Map an interface index to its name (SIOCGIFNAME) | 
 | 3941 |  */ | 
 | 3942 |  | 
 | 3943 | /* | 
 | 3944 |  *	We need this ioctl for efficient implementation of the | 
 | 3945 |  *	if_indextoname() function required by the IPv6 API.  Without | 
 | 3946 |  *	it, we would have to search all the interfaces to find a | 
 | 3947 |  *	match.  --pb | 
 | 3948 |  */ | 
 | 3949 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 3950 | static int dev_ifname(struct net *net, struct ifreq __user *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3951 | { | 
 | 3952 | 	struct net_device *dev; | 
 | 3953 | 	struct ifreq ifr; | 
 | 3954 |  | 
 | 3955 | 	/* | 
 | 3956 | 	 *	Fetch the caller's info block. | 
 | 3957 | 	 */ | 
 | 3958 |  | 
 | 3959 | 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | 
 | 3960 | 		return -EFAULT; | 
 | 3961 |  | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 3962 | 	rcu_read_lock(); | 
 | 3963 | 	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3964 | 	if (!dev) { | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 3965 | 		rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3966 | 		return -ENODEV; | 
 | 3967 | 	} | 
 | 3968 |  | 
 | 3969 | 	strcpy(ifr.ifr_name, dev->name); | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 3970 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3971 |  | 
 | 3972 | 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) | 
 | 3973 | 		return -EFAULT; | 
 | 3974 | 	return 0; | 
 | 3975 | } | 
 | 3976 |  | 
 | 3977 | /* | 
 | 3978 |  *	Perform a SIOCGIFCONF call. This structure will change | 
 | 3979 |  *	size eventually, and there is nothing I can do about it. | 
 | 3980 |  *	Thus we will need a 'compatibility mode'. | 
 | 3981 |  */ | 
 | 3982 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 3983 | static int dev_ifconf(struct net *net, char __user *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3984 | { | 
 | 3985 | 	struct ifconf ifc; | 
 | 3986 | 	struct net_device *dev; | 
 | 3987 | 	char __user *pos; | 
 | 3988 | 	int len; | 
 | 3989 | 	int total; | 
 | 3990 | 	int i; | 
 | 3991 |  | 
 | 3992 | 	/* | 
 | 3993 | 	 *	Fetch the caller's info block. | 
 | 3994 | 	 */ | 
 | 3995 |  | 
 | 3996 | 	if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) | 
 | 3997 | 		return -EFAULT; | 
 | 3998 |  | 
 | 3999 | 	pos = ifc.ifc_buf; | 
 | 4000 | 	len = ifc.ifc_len; | 
 | 4001 |  | 
 | 4002 | 	/* | 
 | 4003 | 	 *	Loop over the interfaces, and write an info block for each. | 
 | 4004 | 	 */ | 
 | 4005 |  | 
 | 4006 | 	total = 0; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4007 | 	for_each_netdev(net, dev) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4008 | 		for (i = 0; i < NPROTO; i++) { | 
 | 4009 | 			if (gifconf_list[i]) { | 
 | 4010 | 				int done; | 
 | 4011 | 				if (!pos) | 
 | 4012 | 					done = gifconf_list[i](dev, NULL, 0); | 
 | 4013 | 				else | 
 | 4014 | 					done = gifconf_list[i](dev, pos + total, | 
 | 4015 | 							       len - total); | 
 | 4016 | 				if (done < 0) | 
 | 4017 | 					return -EFAULT; | 
 | 4018 | 				total += done; | 
 | 4019 | 			} | 
 | 4020 | 		} | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 4021 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4022 |  | 
 | 4023 | 	/* | 
 | 4024 | 	 *	All done.  Write the updated control block back to the caller. | 
 | 4025 | 	 */ | 
 | 4026 | 	ifc.ifc_len = total; | 
 | 4027 |  | 
 | 4028 | 	/* | 
 | 4029 | 	 * 	Both BSD and Solaris return 0 here, so we do too. | 
 | 4030 | 	 */ | 
 | 4031 | 	return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0; | 
 | 4032 | } | 
 | 4033 |  | 
 | 4034 | #ifdef CONFIG_PROC_FS | 
 | 4035 | /* | 
 | 4036 |  *	This is invoked by the /proc filesystem handler to display a device | 
 | 4037 |  *	in detail. | 
 | 4038 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4039 | void *dev_seq_start(struct seq_file *seq, loff_t *pos) | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 4040 | 	__acquires(RCU) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4041 | { | 
| Denis V. Lunev | e372c41 | 2007-11-19 22:31:54 -0800 | [diff] [blame] | 4042 | 	struct net *net = seq_file_net(seq); | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 4043 | 	loff_t off; | 
 | 4044 | 	struct net_device *dev; | 
 | 4045 |  | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 4046 | 	rcu_read_lock(); | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 4047 | 	if (!*pos) | 
 | 4048 | 		return SEQ_START_TOKEN; | 
 | 4049 |  | 
 | 4050 | 	off = 1; | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 4051 | 	for_each_netdev_rcu(net, dev) | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 4052 | 		if (off++ == *pos) | 
 | 4053 | 			return dev; | 
 | 4054 |  | 
 | 4055 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4056 | } | 
 | 4057 |  | 
 | 4058 | void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 
 | 4059 | { | 
| Eric Dumazet | ccf4343 | 2011-01-26 18:08:02 +0000 | [diff] [blame] | 4060 | 	struct net_device *dev = v; | 
 | 4061 |  | 
 | 4062 | 	if (v == SEQ_START_TOKEN) | 
 | 4063 | 		dev = first_net_device_rcu(seq_file_net(seq)); | 
 | 4064 | 	else | 
 | 4065 | 		dev = next_net_device_rcu(dev); | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 4066 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4067 | 	++*pos; | 
| Eric Dumazet | ccf4343 | 2011-01-26 18:08:02 +0000 | [diff] [blame] | 4068 | 	return dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4069 | } | 
 | 4070 |  | 
 | 4071 | void dev_seq_stop(struct seq_file *seq, void *v) | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 4072 | 	__releases(RCU) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4073 | { | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 4074 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4075 | } | 
 | 4076 |  | 
 | 4077 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | 
 | 4078 | { | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 4079 | 	struct rtnl_link_stats64 temp; | 
 | 4080 | 	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4081 |  | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 4082 | 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " | 
 | 4083 | 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", | 
| Rusty Russell | 5a1b589 | 2007-04-28 21:04:03 -0700 | [diff] [blame] | 4084 | 		   dev->name, stats->rx_bytes, stats->rx_packets, | 
 | 4085 | 		   stats->rx_errors, | 
 | 4086 | 		   stats->rx_dropped + stats->rx_missed_errors, | 
 | 4087 | 		   stats->rx_fifo_errors, | 
 | 4088 | 		   stats->rx_length_errors + stats->rx_over_errors + | 
 | 4089 | 		    stats->rx_crc_errors + stats->rx_frame_errors, | 
 | 4090 | 		   stats->rx_compressed, stats->multicast, | 
 | 4091 | 		   stats->tx_bytes, stats->tx_packets, | 
 | 4092 | 		   stats->tx_errors, stats->tx_dropped, | 
 | 4093 | 		   stats->tx_fifo_errors, stats->collisions, | 
 | 4094 | 		   stats->tx_carrier_errors + | 
 | 4095 | 		    stats->tx_aborted_errors + | 
 | 4096 | 		    stats->tx_window_errors + | 
 | 4097 | 		    stats->tx_heartbeat_errors, | 
 | 4098 | 		   stats->tx_compressed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4099 | } | 
 | 4100 |  | 
 | 4101 | /* | 
 | 4102 |  *	Called from the PROCfs module. This now uses the new arbitrary sized | 
 | 4103 |  *	/proc/net interface to create /proc/net/dev | 
 | 4104 |  */ | 
 | 4105 | static int dev_seq_show(struct seq_file *seq, void *v) | 
 | 4106 | { | 
 | 4107 | 	if (v == SEQ_START_TOKEN) | 
 | 4108 | 		seq_puts(seq, "Inter-|   Receive                            " | 
 | 4109 | 			      "                    |  Transmit\n" | 
 | 4110 | 			      " face |bytes    packets errs drop fifo frame " | 
 | 4111 | 			      "compressed multicast|bytes    packets errs " | 
 | 4112 | 			      "drop fifo colls carrier compressed\n"); | 
 | 4113 | 	else | 
 | 4114 | 		dev_seq_printf_stats(seq, v); | 
 | 4115 | 	return 0; | 
 | 4116 | } | 
 | 4117 |  | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 4118 | static struct softnet_data *softnet_get_online(loff_t *pos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4119 | { | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 4120 | 	struct softnet_data *sd = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4121 |  | 
| Mike Travis | 0c0b0ac | 2008-05-02 16:43:08 -0700 | [diff] [blame] | 4122 | 	while (*pos < nr_cpu_ids) | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 4123 | 		if (cpu_online(*pos)) { | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 4124 | 			sd = &per_cpu(softnet_data, *pos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4125 | 			break; | 
 | 4126 | 		} else | 
 | 4127 | 			++*pos; | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 4128 | 	return sd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4129 | } | 
 | 4130 |  | 
 | 4131 | static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) | 
 | 4132 | { | 
 | 4133 | 	return softnet_get_online(pos); | 
 | 4134 | } | 
 | 4135 |  | 
 | 4136 | static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 
 | 4137 | { | 
 | 4138 | 	++*pos; | 
 | 4139 | 	return softnet_get_online(pos); | 
 | 4140 | } | 
 | 4141 |  | 
 | 4142 | static void softnet_seq_stop(struct seq_file *seq, void *v) | 
 | 4143 | { | 
 | 4144 | } | 
 | 4145 |  | 
 | 4146 | static int softnet_seq_show(struct seq_file *seq, void *v) | 
 | 4147 | { | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 4148 | 	struct softnet_data *sd = v; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4149 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 4150 | 	seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 4151 | 		   sd->processed, sd->dropped, sd->time_squeeze, 0, | 
| Stephen Hemminger | c1ebcdb | 2005-06-23 20:08:59 -0700 | [diff] [blame] | 4152 | 		   0, 0, 0, 0, /* was fastroute */ | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 4153 | 		   sd->cpu_collision, sd->received_rps); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4154 | 	return 0; | 
 | 4155 | } | 
 | 4156 |  | 
| Stephen Hemminger | f690808 | 2007-03-12 14:34:29 -0700 | [diff] [blame] | 4157 | static const struct seq_operations dev_seq_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4158 | 	.start = dev_seq_start, | 
 | 4159 | 	.next  = dev_seq_next, | 
 | 4160 | 	.stop  = dev_seq_stop, | 
 | 4161 | 	.show  = dev_seq_show, | 
 | 4162 | }; | 
 | 4163 |  | 
 | 4164 | static int dev_seq_open(struct inode *inode, struct file *file) | 
 | 4165 | { | 
| Denis V. Lunev | e372c41 | 2007-11-19 22:31:54 -0800 | [diff] [blame] | 4166 | 	return seq_open_net(inode, file, &dev_seq_ops, | 
 | 4167 | 			    sizeof(struct seq_net_private)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4168 | } | 
 | 4169 |  | 
| Arjan van de Ven | 9a32144 | 2007-02-12 00:55:35 -0800 | [diff] [blame] | 4170 | static const struct file_operations dev_seq_fops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4171 | 	.owner	 = THIS_MODULE, | 
 | 4172 | 	.open    = dev_seq_open, | 
 | 4173 | 	.read    = seq_read, | 
 | 4174 | 	.llseek  = seq_lseek, | 
| Denis V. Lunev | e372c41 | 2007-11-19 22:31:54 -0800 | [diff] [blame] | 4175 | 	.release = seq_release_net, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4176 | }; | 
 | 4177 |  | 
| Stephen Hemminger | f690808 | 2007-03-12 14:34:29 -0700 | [diff] [blame] | 4178 | static const struct seq_operations softnet_seq_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4179 | 	.start = softnet_seq_start, | 
 | 4180 | 	.next  = softnet_seq_next, | 
 | 4181 | 	.stop  = softnet_seq_stop, | 
 | 4182 | 	.show  = softnet_seq_show, | 
 | 4183 | }; | 
 | 4184 |  | 
 | 4185 | static int softnet_seq_open(struct inode *inode, struct file *file) | 
 | 4186 | { | 
 | 4187 | 	return seq_open(file, &softnet_seq_ops); | 
 | 4188 | } | 
 | 4189 |  | 
| Arjan van de Ven | 9a32144 | 2007-02-12 00:55:35 -0800 | [diff] [blame] | 4190 | static const struct file_operations softnet_seq_fops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4191 | 	.owner	 = THIS_MODULE, | 
 | 4192 | 	.open    = softnet_seq_open, | 
 | 4193 | 	.read    = seq_read, | 
 | 4194 | 	.llseek  = seq_lseek, | 
 | 4195 | 	.release = seq_release, | 
 | 4196 | }; | 
 | 4197 |  | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4198 | static void *ptype_get_idx(loff_t pos) | 
 | 4199 | { | 
 | 4200 | 	struct packet_type *pt = NULL; | 
 | 4201 | 	loff_t i = 0; | 
 | 4202 | 	int t; | 
 | 4203 |  | 
 | 4204 | 	list_for_each_entry_rcu(pt, &ptype_all, list) { | 
 | 4205 | 		if (i == pos) | 
 | 4206 | 			return pt; | 
 | 4207 | 		++i; | 
 | 4208 | 	} | 
 | 4209 |  | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 4210 | 	for (t = 0; t < PTYPE_HASH_SIZE; t++) { | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4211 | 		list_for_each_entry_rcu(pt, &ptype_base[t], list) { | 
 | 4212 | 			if (i == pos) | 
 | 4213 | 				return pt; | 
 | 4214 | 			++i; | 
 | 4215 | 		} | 
 | 4216 | 	} | 
 | 4217 | 	return NULL; | 
 | 4218 | } | 
 | 4219 |  | 
 | 4220 | static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) | 
| Stephen Hemminger | 72348a4 | 2008-01-21 02:27:29 -0800 | [diff] [blame] | 4221 | 	__acquires(RCU) | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4222 | { | 
 | 4223 | 	rcu_read_lock(); | 
 | 4224 | 	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; | 
 | 4225 | } | 
 | 4226 |  | 
 | 4227 | static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 
 | 4228 | { | 
 | 4229 | 	struct packet_type *pt; | 
 | 4230 | 	struct list_head *nxt; | 
 | 4231 | 	int hash; | 
 | 4232 |  | 
 | 4233 | 	++*pos; | 
 | 4234 | 	if (v == SEQ_START_TOKEN) | 
 | 4235 | 		return ptype_get_idx(0); | 
 | 4236 |  | 
 | 4237 | 	pt = v; | 
 | 4238 | 	nxt = pt->list.next; | 
 | 4239 | 	if (pt->type == htons(ETH_P_ALL)) { | 
 | 4240 | 		if (nxt != &ptype_all) | 
 | 4241 | 			goto found; | 
 | 4242 | 		hash = 0; | 
 | 4243 | 		nxt = ptype_base[0].next; | 
 | 4244 | 	} else | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 4245 | 		hash = ntohs(pt->type) & PTYPE_HASH_MASK; | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4246 |  | 
 | 4247 | 	while (nxt == &ptype_base[hash]) { | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 4248 | 		if (++hash >= PTYPE_HASH_SIZE) | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4249 | 			return NULL; | 
 | 4250 | 		nxt = ptype_base[hash].next; | 
 | 4251 | 	} | 
 | 4252 | found: | 
 | 4253 | 	return list_entry(nxt, struct packet_type, list); | 
 | 4254 | } | 
 | 4255 |  | 
 | 4256 | static void ptype_seq_stop(struct seq_file *seq, void *v) | 
| Stephen Hemminger | 72348a4 | 2008-01-21 02:27:29 -0800 | [diff] [blame] | 4257 | 	__releases(RCU) | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4258 | { | 
 | 4259 | 	rcu_read_unlock(); | 
 | 4260 | } | 
 | 4261 |  | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4262 | static int ptype_seq_show(struct seq_file *seq, void *v) | 
 | 4263 | { | 
 | 4264 | 	struct packet_type *pt = v; | 
 | 4265 |  | 
 | 4266 | 	if (v == SEQ_START_TOKEN) | 
 | 4267 | 		seq_puts(seq, "Type Device      Function\n"); | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 4268 | 	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4269 | 		if (pt->type == htons(ETH_P_ALL)) | 
 | 4270 | 			seq_puts(seq, "ALL "); | 
 | 4271 | 		else | 
 | 4272 | 			seq_printf(seq, "%04x", ntohs(pt->type)); | 
 | 4273 |  | 
| Alexey Dobriyan | 908cd2d | 2008-11-16 19:50:35 -0800 | [diff] [blame] | 4274 | 		seq_printf(seq, " %-8s %pF\n", | 
 | 4275 | 			   pt->dev ? pt->dev->name : "", pt->func); | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4276 | 	} | 
 | 4277 |  | 
 | 4278 | 	return 0; | 
 | 4279 | } | 
 | 4280 |  | 
 | 4281 | static const struct seq_operations ptype_seq_ops = { | 
 | 4282 | 	.start = ptype_seq_start, | 
 | 4283 | 	.next  = ptype_seq_next, | 
 | 4284 | 	.stop  = ptype_seq_stop, | 
 | 4285 | 	.show  = ptype_seq_show, | 
 | 4286 | }; | 
 | 4287 |  | 
 | 4288 | static int ptype_seq_open(struct inode *inode, struct file *file) | 
 | 4289 | { | 
| Pavel Emelyanov | 2feb27d | 2008-03-24 14:57:45 -0700 | [diff] [blame] | 4290 | 	return seq_open_net(inode, file, &ptype_seq_ops, | 
 | 4291 | 			sizeof(struct seq_net_private)); | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4292 | } | 
 | 4293 |  | 
 | 4294 | static const struct file_operations ptype_seq_fops = { | 
 | 4295 | 	.owner	 = THIS_MODULE, | 
 | 4296 | 	.open    = ptype_seq_open, | 
 | 4297 | 	.read    = seq_read, | 
 | 4298 | 	.llseek  = seq_lseek, | 
| Pavel Emelyanov | 2feb27d | 2008-03-24 14:57:45 -0700 | [diff] [blame] | 4299 | 	.release = seq_release_net, | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4300 | }; | 
 | 4301 |  | 
 | 4302 |  | 
| Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 4303 | static int __net_init dev_proc_net_init(struct net *net) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4304 | { | 
 | 4305 | 	int rc = -ENOMEM; | 
 | 4306 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4307 | 	if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4308 | 		goto out; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4309 | 	if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4310 | 		goto out_dev; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4311 | 	if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) | 
| Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 4312 | 		goto out_softnet; | 
| Stephen Hemminger | 0e1256f | 2007-03-12 14:35:37 -0700 | [diff] [blame] | 4313 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4314 | 	if (wext_proc_init(net)) | 
| Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 4315 | 		goto out_ptype; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4316 | 	rc = 0; | 
 | 4317 | out: | 
 | 4318 | 	return rc; | 
| Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 4319 | out_ptype: | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4320 | 	proc_net_remove(net, "ptype"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4321 | out_softnet: | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4322 | 	proc_net_remove(net, "softnet_stat"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4323 | out_dev: | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4324 | 	proc_net_remove(net, "dev"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4325 | 	goto out; | 
 | 4326 | } | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4327 |  | 
| Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 4328 | static void __net_exit dev_proc_net_exit(struct net *net) | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4329 | { | 
 | 4330 | 	wext_proc_exit(net); | 
 | 4331 |  | 
 | 4332 | 	proc_net_remove(net, "ptype"); | 
 | 4333 | 	proc_net_remove(net, "softnet_stat"); | 
 | 4334 | 	proc_net_remove(net, "dev"); | 
 | 4335 | } | 
 | 4336 |  | 
| Denis V. Lunev | 022cbae | 2007-11-13 03:23:50 -0800 | [diff] [blame] | 4337 | static struct pernet_operations __net_initdata dev_proc_ops = { | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4338 | 	.init = dev_proc_net_init, | 
 | 4339 | 	.exit = dev_proc_net_exit, | 
 | 4340 | }; | 
 | 4341 |  | 
 | 4342 | static int __init dev_proc_init(void) | 
 | 4343 | { | 
 | 4344 | 	return register_pernet_subsys(&dev_proc_ops); | 
 | 4345 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4346 | #else | 
 | 4347 | #define dev_proc_init() 0 | 
 | 4348 | #endif	/* CONFIG_PROC_FS */ | 
 | 4349 |  | 
 | 4350 |  | 
 | 4351 | /** | 
| Jiri Pirko | 1765a57 | 2011-02-12 06:48:36 +0000 | [diff] [blame^] | 4352 |  *	netdev_set_master	-	set up master pointer | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4353 |  *	@slave: slave device | 
 | 4354 |  *	@master: new master device | 
 | 4355 |  * | 
 | 4356 |  *	Changes the master device of the slave. Pass %NULL to break the | 
 | 4357 |  *	bonding. The caller must hold the RTNL semaphore. On a failure | 
 | 4358 |  *	a negative errno code is returned. On success the reference counts | 
| Jiri Pirko | 1765a57 | 2011-02-12 06:48:36 +0000 | [diff] [blame^] | 4359 |  *	are adjusted and the function returns zero. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4360 |  */ | 
 | 4361 | int netdev_set_master(struct net_device *slave, struct net_device *master) | 
 | 4362 | { | 
 | 4363 | 	struct net_device *old = slave->master; | 
 | 4364 |  | 
 | 4365 | 	ASSERT_RTNL(); | 
 | 4366 |  | 
 | 4367 | 	if (master) { | 
 | 4368 | 		if (old) | 
 | 4369 | 			return -EBUSY; | 
 | 4370 | 		dev_hold(master); | 
 | 4371 | 	} | 
 | 4372 |  | 
 | 4373 | 	slave->master = master; | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 4374 |  | 
| Eric Dumazet | 283f2fe | 2010-03-18 13:37:40 +0000 | [diff] [blame] | 4375 | 	if (old) { | 
 | 4376 | 		synchronize_net(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4377 | 		dev_put(old); | 
| Eric Dumazet | 283f2fe | 2010-03-18 13:37:40 +0000 | [diff] [blame] | 4378 | 	} | 
| Jiri Pirko | 1765a57 | 2011-02-12 06:48:36 +0000 | [diff] [blame^] | 4379 | 	return 0; | 
 | 4380 | } | 
 | 4381 | EXPORT_SYMBOL(netdev_set_master); | 
 | 4382 |  | 
 | 4383 | /** | 
 | 4384 |  *	netdev_set_bond_master	-	set up bonding master/slave pair | 
 | 4385 |  *	@slave: slave device | 
 | 4386 |  *	@master: new master device | 
 | 4387 |  * | 
 | 4388 |  *	Changes the master device of the slave. Pass %NULL to break the | 
 | 4389 |  *	bonding. The caller must hold the RTNL semaphore. On a failure | 
 | 4390 |  *	a negative errno code is returned. On success %RTM_NEWLINK is sent | 
 | 4391 |  *	to the routing socket and the function returns zero. | 
 | 4392 |  */ | 
 | 4393 | int netdev_set_bond_master(struct net_device *slave, struct net_device *master) | 
 | 4394 | { | 
 | 4395 | 	int err; | 
 | 4396 |  | 
 | 4397 | 	ASSERT_RTNL(); | 
 | 4398 |  | 
 | 4399 | 	err = netdev_set_master(slave, master); | 
 | 4400 | 	if (err) | 
 | 4401 | 		return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4402 | 	if (master) | 
 | 4403 | 		slave->flags |= IFF_SLAVE; | 
 | 4404 | 	else | 
 | 4405 | 		slave->flags &= ~IFF_SLAVE; | 
 | 4406 |  | 
 | 4407 | 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); | 
 | 4408 | 	return 0; | 
 | 4409 | } | 
| Jiri Pirko | 1765a57 | 2011-02-12 06:48:36 +0000 | [diff] [blame^] | 4410 | EXPORT_SYMBOL(netdev_set_bond_master); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4411 |  | 
| Patrick McHardy | b6c40d6 | 2008-10-07 15:26:48 -0700 | [diff] [blame] | 4412 | static void dev_change_rx_flags(struct net_device *dev, int flags) | 
 | 4413 | { | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4414 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
 | 4415 |  | 
 | 4416 | 	if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) | 
 | 4417 | 		ops->ndo_change_rx_flags(dev, flags); | 
| Patrick McHardy | b6c40d6 | 2008-10-07 15:26:48 -0700 | [diff] [blame] | 4418 | } | 
 | 4419 |  | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4420 | static int __dev_set_promiscuity(struct net_device *dev, int inc) | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4421 | { | 
 | 4422 | 	unsigned short old_flags = dev->flags; | 
| David Howells | 8192b0c | 2008-11-14 10:39:10 +1100 | [diff] [blame] | 4423 | 	uid_t uid; | 
 | 4424 | 	gid_t gid; | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4425 |  | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 4426 | 	ASSERT_RTNL(); | 
 | 4427 |  | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4428 | 	dev->flags |= IFF_PROMISC; | 
 | 4429 | 	dev->promiscuity += inc; | 
 | 4430 | 	if (dev->promiscuity == 0) { | 
 | 4431 | 		/* | 
 | 4432 | 		 * Avoid overflow. | 
 | 4433 | 		 * If inc causes overflow, untouch promisc and return error. | 
 | 4434 | 		 */ | 
 | 4435 | 		if (inc < 0) | 
 | 4436 | 			dev->flags &= ~IFF_PROMISC; | 
 | 4437 | 		else { | 
 | 4438 | 			dev->promiscuity -= inc; | 
 | 4439 | 			printk(KERN_WARNING "%s: promiscuity touches roof, " | 
 | 4440 | 				"set promiscuity failed, promiscuity feature " | 
 | 4441 | 				"of device might be broken.\n", dev->name); | 
 | 4442 | 			return -EOVERFLOW; | 
 | 4443 | 		} | 
 | 4444 | 	} | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4445 | 	if (dev->flags != old_flags) { | 
 | 4446 | 		printk(KERN_INFO "device %s %s promiscuous mode\n", | 
 | 4447 | 		       dev->name, (dev->flags & IFF_PROMISC) ? "entered" : | 
 | 4448 | 							       "left"); | 
| David Howells | 8192b0c | 2008-11-14 10:39:10 +1100 | [diff] [blame] | 4449 | 		if (audit_enabled) { | 
 | 4450 | 			current_uid_gid(&uid, &gid); | 
| Klaus Heinrich Kiwi | 7759db8 | 2008-01-23 22:57:45 -0500 | [diff] [blame] | 4451 | 			audit_log(current->audit_context, GFP_ATOMIC, | 
 | 4452 | 				AUDIT_ANOM_PROMISCUOUS, | 
 | 4453 | 				"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", | 
 | 4454 | 				dev->name, (dev->flags & IFF_PROMISC), | 
 | 4455 | 				(old_flags & IFF_PROMISC), | 
 | 4456 | 				audit_get_loginuid(current), | 
| David Howells | 8192b0c | 2008-11-14 10:39:10 +1100 | [diff] [blame] | 4457 | 				uid, gid, | 
| Klaus Heinrich Kiwi | 7759db8 | 2008-01-23 22:57:45 -0500 | [diff] [blame] | 4458 | 				audit_get_sessionid(current)); | 
| David Howells | 8192b0c | 2008-11-14 10:39:10 +1100 | [diff] [blame] | 4459 | 		} | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 4460 |  | 
| Patrick McHardy | b6c40d6 | 2008-10-07 15:26:48 -0700 | [diff] [blame] | 4461 | 		dev_change_rx_flags(dev, IFF_PROMISC); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4462 | 	} | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4463 | 	return 0; | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4464 | } | 
 | 4465 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4466 | /** | 
 | 4467 |  *	dev_set_promiscuity	- update promiscuity count on a device | 
 | 4468 |  *	@dev: device | 
 | 4469 |  *	@inc: modifier | 
 | 4470 |  * | 
| Stephen Hemminger | 3041a06 | 2006-05-26 13:25:24 -0700 | [diff] [blame] | 4471 |  *	Add or remove promiscuity from a device. While the count in the device | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4472 |  *	remains above zero the interface remains promiscuous. Once it hits zero | 
 | 4473 |  *	the device reverts back to normal filtering operation. A negative inc | 
 | 4474 |  *	value is used to drop promiscuity on the device. | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4475 |  *	Return 0 if successful or a negative errno code on error. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4476 |  */ | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4477 | int dev_set_promiscuity(struct net_device *dev, int inc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4478 | { | 
 | 4479 | 	unsigned short old_flags = dev->flags; | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4480 | 	int err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4481 |  | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4482 | 	err = __dev_set_promiscuity(dev, inc); | 
| Patrick McHardy | 4b5a698 | 2008-07-06 15:49:08 -0700 | [diff] [blame] | 4483 | 	if (err < 0) | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4484 | 		return err; | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4485 | 	if (dev->flags != old_flags) | 
 | 4486 | 		dev_set_rx_mode(dev); | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4487 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4488 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4489 | EXPORT_SYMBOL(dev_set_promiscuity); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4490 |  | 
 | 4491 | /** | 
 | 4492 |  *	dev_set_allmulti	- update allmulti count on a device | 
 | 4493 |  *	@dev: device | 
 | 4494 |  *	@inc: modifier | 
 | 4495 |  * | 
 | 4496 |  *	Add or remove reception of all multicast frames to a device. While the | 
 | 4497 |  *	count in the device remains above zero the interface remains listening | 
 | 4498 |  *	to all interfaces. Once it hits zero the device reverts back to normal | 
 | 4499 |  *	filtering operation. A negative @inc value is used to drop the counter | 
 | 4500 |  *	when releasing a resource needing all multicasts. | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4501 |  *	Return 0 if successful or a negative errno code on error. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4502 |  */ | 
 | 4503 |  | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4504 | int dev_set_allmulti(struct net_device *dev, int inc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4505 | { | 
 | 4506 | 	unsigned short old_flags = dev->flags; | 
 | 4507 |  | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 4508 | 	ASSERT_RTNL(); | 
 | 4509 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4510 | 	dev->flags |= IFF_ALLMULTI; | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4511 | 	dev->allmulti += inc; | 
 | 4512 | 	if (dev->allmulti == 0) { | 
 | 4513 | 		/* | 
 | 4514 | 		 * Avoid overflow. | 
 | 4515 | 		 * If inc causes overflow, untouch allmulti and return error. | 
 | 4516 | 		 */ | 
 | 4517 | 		if (inc < 0) | 
 | 4518 | 			dev->flags &= ~IFF_ALLMULTI; | 
 | 4519 | 		else { | 
 | 4520 | 			dev->allmulti -= inc; | 
 | 4521 | 			printk(KERN_WARNING "%s: allmulti touches roof, " | 
 | 4522 | 				"set allmulti failed, allmulti feature of " | 
 | 4523 | 				"device might be broken.\n", dev->name); | 
 | 4524 | 			return -EOVERFLOW; | 
 | 4525 | 		} | 
 | 4526 | 	} | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 4527 | 	if (dev->flags ^ old_flags) { | 
| Patrick McHardy | b6c40d6 | 2008-10-07 15:26:48 -0700 | [diff] [blame] | 4528 | 		dev_change_rx_flags(dev, IFF_ALLMULTI); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4529 | 		dev_set_rx_mode(dev); | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 4530 | 	} | 
| Wang Chen | dad9b33 | 2008-06-18 01:48:28 -0700 | [diff] [blame] | 4531 | 	return 0; | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4532 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4533 | EXPORT_SYMBOL(dev_set_allmulti); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4534 |  | 
 | 4535 | /* | 
 | 4536 |  *	Upload unicast and multicast address lists to device and | 
 | 4537 |  *	configure RX filtering. When the device doesn't support unicast | 
| Joe Perches | 53ccaae | 2007-12-20 14:02:06 -0800 | [diff] [blame] | 4538 |  *	filtering it is put in promiscuous mode while unicast addresses | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4539 |  *	are present. | 
 | 4540 |  */ | 
 | 4541 | void __dev_set_rx_mode(struct net_device *dev) | 
 | 4542 | { | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4543 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
 | 4544 |  | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4545 | 	/* dev_open will call this function so the list will stay sane. */ | 
 | 4546 | 	if (!(dev->flags&IFF_UP)) | 
 | 4547 | 		return; | 
 | 4548 |  | 
 | 4549 | 	if (!netif_device_present(dev)) | 
| YOSHIFUJI Hideaki | 40b77c9 | 2007-07-19 10:43:23 +0900 | [diff] [blame] | 4550 | 		return; | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4551 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4552 | 	if (ops->ndo_set_rx_mode) | 
 | 4553 | 		ops->ndo_set_rx_mode(dev); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4554 | 	else { | 
 | 4555 | 		/* Unicast addresses changes may only happen under the rtnl, | 
 | 4556 | 		 * therefore calling __dev_set_promiscuity here is safe. | 
 | 4557 | 		 */ | 
| Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 4558 | 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) { | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4559 | 			__dev_set_promiscuity(dev, 1); | 
 | 4560 | 			dev->uc_promisc = 1; | 
| Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 4561 | 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) { | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4562 | 			__dev_set_promiscuity(dev, -1); | 
 | 4563 | 			dev->uc_promisc = 0; | 
 | 4564 | 		} | 
 | 4565 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4566 | 		if (ops->ndo_set_multicast_list) | 
 | 4567 | 			ops->ndo_set_multicast_list(dev); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4568 | 	} | 
 | 4569 | } | 
 | 4570 |  | 
 | 4571 | void dev_set_rx_mode(struct net_device *dev) | 
 | 4572 | { | 
| David S. Miller | b9e4085 | 2008-07-15 00:15:08 -0700 | [diff] [blame] | 4573 | 	netif_addr_lock_bh(dev); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4574 | 	__dev_set_rx_mode(dev); | 
| David S. Miller | b9e4085 | 2008-07-15 00:15:08 -0700 | [diff] [blame] | 4575 | 	netif_addr_unlock_bh(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4576 | } | 
 | 4577 |  | 
| Stephen Hemminger | f0db275 | 2008-09-30 02:23:58 -0700 | [diff] [blame] | 4578 | /** | 
 | 4579 |  *	dev_get_flags - get flags reported to userspace | 
 | 4580 |  *	@dev: device | 
 | 4581 |  * | 
 | 4582 |  *	Get the combination of flag bits exported through APIs to userspace. | 
 | 4583 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4584 | unsigned dev_get_flags(const struct net_device *dev) | 
 | 4585 | { | 
 | 4586 | 	unsigned flags; | 
 | 4587 |  | 
 | 4588 | 	flags = (dev->flags & ~(IFF_PROMISC | | 
 | 4589 | 				IFF_ALLMULTI | | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 4590 | 				IFF_RUNNING | | 
 | 4591 | 				IFF_LOWER_UP | | 
 | 4592 | 				IFF_DORMANT)) | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4593 | 		(dev->gflags & (IFF_PROMISC | | 
 | 4594 | 				IFF_ALLMULTI)); | 
 | 4595 |  | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 4596 | 	if (netif_running(dev)) { | 
 | 4597 | 		if (netif_oper_up(dev)) | 
 | 4598 | 			flags |= IFF_RUNNING; | 
 | 4599 | 		if (netif_carrier_ok(dev)) | 
 | 4600 | 			flags |= IFF_LOWER_UP; | 
 | 4601 | 		if (netif_dormant(dev)) | 
 | 4602 | 			flags |= IFF_DORMANT; | 
 | 4603 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4604 |  | 
 | 4605 | 	return flags; | 
 | 4606 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4607 | EXPORT_SYMBOL(dev_get_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4608 |  | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 4609 | int __dev_change_flags(struct net_device *dev, unsigned int flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4610 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4611 | 	int old_flags = dev->flags; | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 4612 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4613 |  | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 4614 | 	ASSERT_RTNL(); | 
 | 4615 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4616 | 	/* | 
 | 4617 | 	 *	Set the flags on our device. | 
 | 4618 | 	 */ | 
 | 4619 |  | 
 | 4620 | 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | | 
 | 4621 | 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | | 
 | 4622 | 			       IFF_AUTOMEDIA)) | | 
 | 4623 | 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | | 
 | 4624 | 				    IFF_ALLMULTI)); | 
 | 4625 |  | 
 | 4626 | 	/* | 
 | 4627 | 	 *	Load in the correct multicast list now the flags have changed. | 
 | 4628 | 	 */ | 
 | 4629 |  | 
| Patrick McHardy | b6c40d6 | 2008-10-07 15:26:48 -0700 | [diff] [blame] | 4630 | 	if ((old_flags ^ flags) & IFF_MULTICAST) | 
 | 4631 | 		dev_change_rx_flags(dev, IFF_MULTICAST); | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 4632 |  | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4633 | 	dev_set_rx_mode(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4634 |  | 
 | 4635 | 	/* | 
 | 4636 | 	 *	Have we downed the interface. We handle IFF_UP ourselves | 
 | 4637 | 	 *	according to user attempts to set it, rather than blindly | 
 | 4638 | 	 *	setting it. | 
 | 4639 | 	 */ | 
 | 4640 |  | 
 | 4641 | 	ret = 0; | 
 | 4642 | 	if ((old_flags ^ flags) & IFF_UP) {	/* Bit is different  ? */ | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 4643 | 		ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4644 |  | 
 | 4645 | 		if (!ret) | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 4646 | 			dev_set_rx_mode(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4647 | 	} | 
 | 4648 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4649 | 	if ((flags ^ dev->gflags) & IFF_PROMISC) { | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4650 | 		int inc = (flags & IFF_PROMISC) ? 1 : -1; | 
 | 4651 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4652 | 		dev->gflags ^= IFF_PROMISC; | 
 | 4653 | 		dev_set_promiscuity(dev, inc); | 
 | 4654 | 	} | 
 | 4655 |  | 
 | 4656 | 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI | 
 | 4657 | 	   is important. Some (broken) drivers set IFF_PROMISC, when | 
 | 4658 | 	   IFF_ALLMULTI is requested not asking us and not reporting. | 
 | 4659 | 	 */ | 
 | 4660 | 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) { | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4661 | 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1; | 
 | 4662 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4663 | 		dev->gflags ^= IFF_ALLMULTI; | 
 | 4664 | 		dev_set_allmulti(dev, inc); | 
 | 4665 | 	} | 
 | 4666 |  | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 4667 | 	return ret; | 
 | 4668 | } | 
 | 4669 |  | 
 | 4670 | void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) | 
 | 4671 | { | 
 | 4672 | 	unsigned int changes = dev->flags ^ old_flags; | 
 | 4673 |  | 
 | 4674 | 	if (changes & IFF_UP) { | 
 | 4675 | 		if (dev->flags & IFF_UP) | 
 | 4676 | 			call_netdevice_notifiers(NETDEV_UP, dev); | 
 | 4677 | 		else | 
 | 4678 | 			call_netdevice_notifiers(NETDEV_DOWN, dev); | 
 | 4679 | 	} | 
 | 4680 |  | 
 | 4681 | 	if (dev->flags & IFF_UP && | 
 | 4682 | 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) | 
 | 4683 | 		call_netdevice_notifiers(NETDEV_CHANGE, dev); | 
 | 4684 | } | 
 | 4685 |  | 
 | 4686 | /** | 
 | 4687 |  *	dev_change_flags - change device settings | 
 | 4688 |  *	@dev: device | 
 | 4689 |  *	@flags: device state flags | 
 | 4690 |  * | 
 | 4691 |  *	Change settings on device based state flags. The flags are | 
 | 4692 |  *	in the userspace exported format. | 
 | 4693 |  */ | 
 | 4694 | int dev_change_flags(struct net_device *dev, unsigned flags) | 
 | 4695 | { | 
 | 4696 | 	int ret, changes; | 
 | 4697 | 	int old_flags = dev->flags; | 
 | 4698 |  | 
 | 4699 | 	ret = __dev_change_flags(dev, flags); | 
 | 4700 | 	if (ret < 0) | 
 | 4701 | 		return ret; | 
 | 4702 |  | 
 | 4703 | 	changes = old_flags ^ dev->flags; | 
| Thomas Graf | 7c355f5 | 2007-06-05 16:03:03 -0700 | [diff] [blame] | 4704 | 	if (changes) | 
 | 4705 | 		rtmsg_ifinfo(RTM_NEWLINK, dev, changes); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4706 |  | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 4707 | 	__dev_notify_flags(dev, old_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4708 | 	return ret; | 
 | 4709 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4710 | EXPORT_SYMBOL(dev_change_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4711 |  | 
| Stephen Hemminger | f0db275 | 2008-09-30 02:23:58 -0700 | [diff] [blame] | 4712 | /** | 
 | 4713 |  *	dev_set_mtu - Change maximum transfer unit | 
 | 4714 |  *	@dev: device | 
 | 4715 |  *	@new_mtu: new transfer unit | 
 | 4716 |  * | 
 | 4717 |  *	Change the maximum transfer size of the network device. | 
 | 4718 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4719 | int dev_set_mtu(struct net_device *dev, int new_mtu) | 
 | 4720 | { | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4721 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4722 | 	int err; | 
 | 4723 |  | 
 | 4724 | 	if (new_mtu == dev->mtu) | 
 | 4725 | 		return 0; | 
 | 4726 |  | 
 | 4727 | 	/*	MTU must be positive.	 */ | 
 | 4728 | 	if (new_mtu < 0) | 
 | 4729 | 		return -EINVAL; | 
 | 4730 |  | 
 | 4731 | 	if (!netif_device_present(dev)) | 
 | 4732 | 		return -ENODEV; | 
 | 4733 |  | 
 | 4734 | 	err = 0; | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4735 | 	if (ops->ndo_change_mtu) | 
 | 4736 | 		err = ops->ndo_change_mtu(dev, new_mtu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4737 | 	else | 
 | 4738 | 		dev->mtu = new_mtu; | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4739 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4740 | 	if (!err && dev->flags & IFF_UP) | 
| Pavel Emelyanov | 056925a | 2007-09-16 15:42:43 -0700 | [diff] [blame] | 4741 | 		call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4742 | 	return err; | 
 | 4743 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4744 | EXPORT_SYMBOL(dev_set_mtu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4745 |  | 
| Stephen Hemminger | f0db275 | 2008-09-30 02:23:58 -0700 | [diff] [blame] | 4746 | /** | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 4747 |  *	dev_set_group - Change group this device belongs to | 
 | 4748 |  *	@dev: device | 
 | 4749 |  *	@new_group: group this device should belong to | 
 | 4750 |  */ | 
 | 4751 | void dev_set_group(struct net_device *dev, int new_group) | 
 | 4752 | { | 
 | 4753 | 	dev->group = new_group; | 
 | 4754 | } | 
 | 4755 | EXPORT_SYMBOL(dev_set_group); | 
 | 4756 |  | 
 | 4757 | /** | 
| Stephen Hemminger | f0db275 | 2008-09-30 02:23:58 -0700 | [diff] [blame] | 4758 |  *	dev_set_mac_address - Change Media Access Control Address | 
 | 4759 |  *	@dev: device | 
 | 4760 |  *	@sa: new address | 
 | 4761 |  * | 
 | 4762 |  *	Change the hardware (MAC) address of the device | 
 | 4763 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4764 | int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | 
 | 4765 | { | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4766 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4767 | 	int err; | 
 | 4768 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4769 | 	if (!ops->ndo_set_mac_address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4770 | 		return -EOPNOTSUPP; | 
 | 4771 | 	if (sa->sa_family != dev->type) | 
 | 4772 | 		return -EINVAL; | 
 | 4773 | 	if (!netif_device_present(dev)) | 
 | 4774 | 		return -ENODEV; | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 4775 | 	err = ops->ndo_set_mac_address(dev, sa); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4776 | 	if (!err) | 
| Pavel Emelyanov | 056925a | 2007-09-16 15:42:43 -0700 | [diff] [blame] | 4777 | 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4778 | 	return err; | 
 | 4779 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4780 | EXPORT_SYMBOL(dev_set_mac_address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4781 |  | 
 | 4782 | /* | 
| Eric Dumazet | 3710bec | 2009-11-01 19:42:09 +0000 | [diff] [blame] | 4783 |  *	Perform the SIOCxIFxxx calls, inside rcu_read_lock() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4784 |  */ | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4785 | static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4786 | { | 
 | 4787 | 	int err; | 
| Eric Dumazet | 3710bec | 2009-11-01 19:42:09 +0000 | [diff] [blame] | 4788 | 	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4789 |  | 
 | 4790 | 	if (!dev) | 
 | 4791 | 		return -ENODEV; | 
 | 4792 |  | 
 | 4793 | 	switch (cmd) { | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4794 | 	case SIOCGIFFLAGS:	/* Get interface flags */ | 
 | 4795 | 		ifr->ifr_flags = (short) dev_get_flags(dev); | 
 | 4796 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4797 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4798 | 	case SIOCGIFMETRIC:	/* Get the metric on the interface | 
 | 4799 | 				   (currently unused) */ | 
 | 4800 | 		ifr->ifr_metric = 0; | 
 | 4801 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4802 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4803 | 	case SIOCGIFMTU:	/* Get the MTU of a device */ | 
 | 4804 | 		ifr->ifr_mtu = dev->mtu; | 
 | 4805 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4806 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4807 | 	case SIOCGIFHWADDR: | 
 | 4808 | 		if (!dev->addr_len) | 
 | 4809 | 			memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); | 
 | 4810 | 		else | 
 | 4811 | 			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, | 
 | 4812 | 			       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | 
 | 4813 | 		ifr->ifr_hwaddr.sa_family = dev->type; | 
 | 4814 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4815 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4816 | 	case SIOCGIFSLAVE: | 
 | 4817 | 		err = -EINVAL; | 
 | 4818 | 		break; | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4819 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4820 | 	case SIOCGIFMAP: | 
 | 4821 | 		ifr->ifr_map.mem_start = dev->mem_start; | 
 | 4822 | 		ifr->ifr_map.mem_end   = dev->mem_end; | 
 | 4823 | 		ifr->ifr_map.base_addr = dev->base_addr; | 
 | 4824 | 		ifr->ifr_map.irq       = dev->irq; | 
 | 4825 | 		ifr->ifr_map.dma       = dev->dma; | 
 | 4826 | 		ifr->ifr_map.port      = dev->if_port; | 
 | 4827 | 		return 0; | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4828 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4829 | 	case SIOCGIFINDEX: | 
 | 4830 | 		ifr->ifr_ifindex = dev->ifindex; | 
 | 4831 | 		return 0; | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4832 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4833 | 	case SIOCGIFTXQLEN: | 
 | 4834 | 		ifr->ifr_qlen = dev->tx_queue_len; | 
 | 4835 | 		return 0; | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4836 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4837 | 	default: | 
 | 4838 | 		/* dev_ioctl() should ensure this case | 
 | 4839 | 		 * is never reached | 
 | 4840 | 		 */ | 
 | 4841 | 		WARN_ON(1); | 
 | 4842 | 		err = -EINVAL; | 
 | 4843 | 		break; | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4844 |  | 
 | 4845 | 	} | 
 | 4846 | 	return err; | 
 | 4847 | } | 
 | 4848 |  | 
 | 4849 | /* | 
 | 4850 |  *	Perform the SIOCxIFxxx calls, inside rtnl_lock() | 
 | 4851 |  */ | 
 | 4852 | static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | 
 | 4853 | { | 
 | 4854 | 	int err; | 
 | 4855 | 	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); | 
| Jarek Poplawski | 5f2f6da | 2008-12-22 19:35:28 -0800 | [diff] [blame] | 4856 | 	const struct net_device_ops *ops; | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4857 |  | 
 | 4858 | 	if (!dev) | 
 | 4859 | 		return -ENODEV; | 
 | 4860 |  | 
| Jarek Poplawski | 5f2f6da | 2008-12-22 19:35:28 -0800 | [diff] [blame] | 4861 | 	ops = dev->netdev_ops; | 
 | 4862 |  | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4863 | 	switch (cmd) { | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4864 | 	case SIOCSIFFLAGS:	/* Set interface flags */ | 
 | 4865 | 		return dev_change_flags(dev, ifr->ifr_flags); | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4866 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4867 | 	case SIOCSIFMETRIC:	/* Set the metric on the interface | 
 | 4868 | 				   (currently unused) */ | 
 | 4869 | 		return -EOPNOTSUPP; | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4870 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4871 | 	case SIOCSIFMTU:	/* Set the MTU of a device */ | 
 | 4872 | 		return dev_set_mtu(dev, ifr->ifr_mtu); | 
| Jeff Garzik | 14e3e07 | 2007-10-08 00:06:32 -0700 | [diff] [blame] | 4873 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4874 | 	case SIOCSIFHWADDR: | 
 | 4875 | 		return dev_set_mac_address(dev, &ifr->ifr_hwaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4876 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4877 | 	case SIOCSIFHWBROADCAST: | 
 | 4878 | 		if (ifr->ifr_hwaddr.sa_family != dev->type) | 
 | 4879 | 			return -EINVAL; | 
 | 4880 | 		memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, | 
 | 4881 | 		       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | 
 | 4882 | 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | 
 | 4883 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4884 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4885 | 	case SIOCSIFMAP: | 
 | 4886 | 		if (ops->ndo_set_config) { | 
 | 4887 | 			if (!netif_device_present(dev)) | 
 | 4888 | 				return -ENODEV; | 
 | 4889 | 			return ops->ndo_set_config(dev, &ifr->ifr_map); | 
 | 4890 | 		} | 
 | 4891 | 		return -EOPNOTSUPP; | 
 | 4892 |  | 
 | 4893 | 	case SIOCADDMULTI: | 
 | 4894 | 		if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | 
 | 4895 | 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC) | 
 | 4896 | 			return -EINVAL; | 
 | 4897 | 		if (!netif_device_present(dev)) | 
 | 4898 | 			return -ENODEV; | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 4899 | 		return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4900 |  | 
 | 4901 | 	case SIOCDELMULTI: | 
 | 4902 | 		if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | 
 | 4903 | 		    ifr->ifr_hwaddr.sa_family != AF_UNSPEC) | 
 | 4904 | 			return -EINVAL; | 
 | 4905 | 		if (!netif_device_present(dev)) | 
 | 4906 | 			return -ENODEV; | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 4907 | 		return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4908 |  | 
 | 4909 | 	case SIOCSIFTXQLEN: | 
 | 4910 | 		if (ifr->ifr_qlen < 0) | 
 | 4911 | 			return -EINVAL; | 
 | 4912 | 		dev->tx_queue_len = ifr->ifr_qlen; | 
 | 4913 | 		return 0; | 
 | 4914 |  | 
 | 4915 | 	case SIOCSIFNAME: | 
 | 4916 | 		ifr->ifr_newname[IFNAMSIZ-1] = '\0'; | 
 | 4917 | 		return dev_change_name(dev, ifr->ifr_newname); | 
 | 4918 |  | 
 | 4919 | 	/* | 
 | 4920 | 	 *	Unknown or private ioctl | 
 | 4921 | 	 */ | 
 | 4922 | 	default: | 
 | 4923 | 		if ((cmd >= SIOCDEVPRIVATE && | 
 | 4924 | 		    cmd <= SIOCDEVPRIVATE + 15) || | 
 | 4925 | 		    cmd == SIOCBONDENSLAVE || | 
 | 4926 | 		    cmd == SIOCBONDRELEASE || | 
 | 4927 | 		    cmd == SIOCBONDSETHWADDR || | 
 | 4928 | 		    cmd == SIOCBONDSLAVEINFOQUERY || | 
 | 4929 | 		    cmd == SIOCBONDINFOQUERY || | 
 | 4930 | 		    cmd == SIOCBONDCHANGEACTIVE || | 
 | 4931 | 		    cmd == SIOCGMIIPHY || | 
 | 4932 | 		    cmd == SIOCGMIIREG || | 
 | 4933 | 		    cmd == SIOCSMIIREG || | 
 | 4934 | 		    cmd == SIOCBRADDIF || | 
 | 4935 | 		    cmd == SIOCBRDELIF || | 
 | 4936 | 		    cmd == SIOCSHWTSTAMP || | 
 | 4937 | 		    cmd == SIOCWANDEV) { | 
 | 4938 | 			err = -EOPNOTSUPP; | 
 | 4939 | 			if (ops->ndo_do_ioctl) { | 
 | 4940 | 				if (netif_device_present(dev)) | 
 | 4941 | 					err = ops->ndo_do_ioctl(dev, ifr, cmd); | 
 | 4942 | 				else | 
 | 4943 | 					err = -ENODEV; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4944 | 			} | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 4945 | 		} else | 
 | 4946 | 			err = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4947 |  | 
 | 4948 | 	} | 
 | 4949 | 	return err; | 
 | 4950 | } | 
 | 4951 |  | 
 | 4952 | /* | 
 | 4953 |  *	This function handles all "interface"-type I/O control requests. The actual | 
 | 4954 |  *	'doing' part of this is dev_ifsioc above. | 
 | 4955 |  */ | 
 | 4956 |  | 
 | 4957 | /** | 
 | 4958 |  *	dev_ioctl	-	network device ioctl | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 4959 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4960 |  *	@cmd: command to issue | 
 | 4961 |  *	@arg: pointer to a struct ifreq in user space | 
 | 4962 |  * | 
 | 4963 |  *	Issue ioctl functions to devices. This is normally called by the | 
 | 4964 |  *	user space syscall interfaces but can sometimes be useful for | 
 | 4965 |  *	other purposes. The return value is the return from the syscall if | 
 | 4966 |  *	positive or a negative errno code on error. | 
 | 4967 |  */ | 
 | 4968 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4969 | int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4970 | { | 
 | 4971 | 	struct ifreq ifr; | 
 | 4972 | 	int ret; | 
 | 4973 | 	char *colon; | 
 | 4974 |  | 
 | 4975 | 	/* One special case: SIOCGIFCONF takes ifconf argument | 
 | 4976 | 	   and requires shared lock, because it sleeps writing | 
 | 4977 | 	   to user space. | 
 | 4978 | 	 */ | 
 | 4979 |  | 
 | 4980 | 	if (cmd == SIOCGIFCONF) { | 
| Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 4981 | 		rtnl_lock(); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4982 | 		ret = dev_ifconf(net, (char __user *) arg); | 
| Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 4983 | 		rtnl_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4984 | 		return ret; | 
 | 4985 | 	} | 
 | 4986 | 	if (cmd == SIOCGIFNAME) | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 4987 | 		return dev_ifname(net, (struct ifreq __user *)arg); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4988 |  | 
 | 4989 | 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | 
 | 4990 | 		return -EFAULT; | 
 | 4991 |  | 
 | 4992 | 	ifr.ifr_name[IFNAMSIZ-1] = 0; | 
 | 4993 |  | 
 | 4994 | 	colon = strchr(ifr.ifr_name, ':'); | 
 | 4995 | 	if (colon) | 
 | 4996 | 		*colon = 0; | 
 | 4997 |  | 
 | 4998 | 	/* | 
 | 4999 | 	 *	See which interface the caller is talking about. | 
 | 5000 | 	 */ | 
 | 5001 |  | 
 | 5002 | 	switch (cmd) { | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5003 | 	/* | 
 | 5004 | 	 *	These ioctl calls: | 
 | 5005 | 	 *	- can be done by all. | 
 | 5006 | 	 *	- atomic and do not require locking. | 
 | 5007 | 	 *	- return a value | 
 | 5008 | 	 */ | 
 | 5009 | 	case SIOCGIFFLAGS: | 
 | 5010 | 	case SIOCGIFMETRIC: | 
 | 5011 | 	case SIOCGIFMTU: | 
 | 5012 | 	case SIOCGIFHWADDR: | 
 | 5013 | 	case SIOCGIFSLAVE: | 
 | 5014 | 	case SIOCGIFMAP: | 
 | 5015 | 	case SIOCGIFINDEX: | 
 | 5016 | 	case SIOCGIFTXQLEN: | 
 | 5017 | 		dev_load(net, ifr.ifr_name); | 
| Eric Dumazet | 3710bec | 2009-11-01 19:42:09 +0000 | [diff] [blame] | 5018 | 		rcu_read_lock(); | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5019 | 		ret = dev_ifsioc_locked(net, &ifr, cmd); | 
| Eric Dumazet | 3710bec | 2009-11-01 19:42:09 +0000 | [diff] [blame] | 5020 | 		rcu_read_unlock(); | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5021 | 		if (!ret) { | 
 | 5022 | 			if (colon) | 
 | 5023 | 				*colon = ':'; | 
 | 5024 | 			if (copy_to_user(arg, &ifr, | 
 | 5025 | 					 sizeof(struct ifreq))) | 
 | 5026 | 				ret = -EFAULT; | 
 | 5027 | 		} | 
 | 5028 | 		return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5029 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5030 | 	case SIOCETHTOOL: | 
 | 5031 | 		dev_load(net, ifr.ifr_name); | 
 | 5032 | 		rtnl_lock(); | 
 | 5033 | 		ret = dev_ethtool(net, &ifr); | 
 | 5034 | 		rtnl_unlock(); | 
 | 5035 | 		if (!ret) { | 
 | 5036 | 			if (colon) | 
 | 5037 | 				*colon = ':'; | 
 | 5038 | 			if (copy_to_user(arg, &ifr, | 
 | 5039 | 					 sizeof(struct ifreq))) | 
 | 5040 | 				ret = -EFAULT; | 
 | 5041 | 		} | 
 | 5042 | 		return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5043 |  | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5044 | 	/* | 
 | 5045 | 	 *	These ioctl calls: | 
 | 5046 | 	 *	- require superuser power. | 
 | 5047 | 	 *	- require strict serialization. | 
 | 5048 | 	 *	- return a value | 
 | 5049 | 	 */ | 
 | 5050 | 	case SIOCGMIIPHY: | 
 | 5051 | 	case SIOCGMIIREG: | 
 | 5052 | 	case SIOCSIFNAME: | 
 | 5053 | 		if (!capable(CAP_NET_ADMIN)) | 
 | 5054 | 			return -EPERM; | 
 | 5055 | 		dev_load(net, ifr.ifr_name); | 
 | 5056 | 		rtnl_lock(); | 
 | 5057 | 		ret = dev_ifsioc(net, &ifr, cmd); | 
 | 5058 | 		rtnl_unlock(); | 
 | 5059 | 		if (!ret) { | 
 | 5060 | 			if (colon) | 
 | 5061 | 				*colon = ':'; | 
 | 5062 | 			if (copy_to_user(arg, &ifr, | 
 | 5063 | 					 sizeof(struct ifreq))) | 
 | 5064 | 				ret = -EFAULT; | 
 | 5065 | 		} | 
 | 5066 | 		return ret; | 
 | 5067 |  | 
 | 5068 | 	/* | 
 | 5069 | 	 *	These ioctl calls: | 
 | 5070 | 	 *	- require superuser power. | 
 | 5071 | 	 *	- require strict serialization. | 
 | 5072 | 	 *	- do not return a value | 
 | 5073 | 	 */ | 
 | 5074 | 	case SIOCSIFFLAGS: | 
 | 5075 | 	case SIOCSIFMETRIC: | 
 | 5076 | 	case SIOCSIFMTU: | 
 | 5077 | 	case SIOCSIFMAP: | 
 | 5078 | 	case SIOCSIFHWADDR: | 
 | 5079 | 	case SIOCSIFSLAVE: | 
 | 5080 | 	case SIOCADDMULTI: | 
 | 5081 | 	case SIOCDELMULTI: | 
 | 5082 | 	case SIOCSIFHWBROADCAST: | 
 | 5083 | 	case SIOCSIFTXQLEN: | 
 | 5084 | 	case SIOCSMIIREG: | 
 | 5085 | 	case SIOCBONDENSLAVE: | 
 | 5086 | 	case SIOCBONDRELEASE: | 
 | 5087 | 	case SIOCBONDSETHWADDR: | 
 | 5088 | 	case SIOCBONDCHANGEACTIVE: | 
 | 5089 | 	case SIOCBRADDIF: | 
 | 5090 | 	case SIOCBRDELIF: | 
 | 5091 | 	case SIOCSHWTSTAMP: | 
 | 5092 | 		if (!capable(CAP_NET_ADMIN)) | 
 | 5093 | 			return -EPERM; | 
 | 5094 | 		/* fall through */ | 
 | 5095 | 	case SIOCBONDSLAVEINFOQUERY: | 
 | 5096 | 	case SIOCBONDINFOQUERY: | 
 | 5097 | 		dev_load(net, ifr.ifr_name); | 
 | 5098 | 		rtnl_lock(); | 
 | 5099 | 		ret = dev_ifsioc(net, &ifr, cmd); | 
 | 5100 | 		rtnl_unlock(); | 
 | 5101 | 		return ret; | 
 | 5102 |  | 
 | 5103 | 	case SIOCGIFMEM: | 
 | 5104 | 		/* Get the per device memory space. We can add this but | 
 | 5105 | 		 * currently do not support it */ | 
 | 5106 | 	case SIOCSIFMEM: | 
 | 5107 | 		/* Set the per device memory buffer space. | 
 | 5108 | 		 * Not applicable in our case */ | 
 | 5109 | 	case SIOCSIFLINK: | 
 | 5110 | 		return -EINVAL; | 
 | 5111 |  | 
 | 5112 | 	/* | 
 | 5113 | 	 *	Unknown or private ioctl. | 
 | 5114 | 	 */ | 
 | 5115 | 	default: | 
 | 5116 | 		if (cmd == SIOCWANDEV || | 
 | 5117 | 		    (cmd >= SIOCDEVPRIVATE && | 
 | 5118 | 		     cmd <= SIOCDEVPRIVATE + 15)) { | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 5119 | 			dev_load(net, ifr.ifr_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5120 | 			rtnl_lock(); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 5121 | 			ret = dev_ifsioc(net, &ifr, cmd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5122 | 			rtnl_unlock(); | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5123 | 			if (!ret && copy_to_user(arg, &ifr, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5124 | 						 sizeof(struct ifreq))) | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5125 | 				ret = -EFAULT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5126 | 			return ret; | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5127 | 		} | 
 | 5128 | 		/* Take care of Wireless Extensions */ | 
 | 5129 | 		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) | 
 | 5130 | 			return wext_handle_ioctl(net, &ifr, cmd, arg); | 
 | 5131 | 		return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5132 | 	} | 
 | 5133 | } | 
 | 5134 |  | 
 | 5135 |  | 
 | 5136 | /** | 
 | 5137 |  *	dev_new_index	-	allocate an ifindex | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 5138 |  *	@net: the applicable net namespace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5139 |  * | 
 | 5140 |  *	Returns a suitable unique value for a new device interface | 
 | 5141 |  *	number.  The caller must hold the rtnl semaphore or the | 
 | 5142 |  *	dev_base_lock to be sure it remains unique. | 
 | 5143 |  */ | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 5144 | static int dev_new_index(struct net *net) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5145 | { | 
 | 5146 | 	static int ifindex; | 
 | 5147 | 	for (;;) { | 
 | 5148 | 		if (++ifindex <= 0) | 
 | 5149 | 			ifindex = 1; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 5150 | 		if (!__dev_get_by_index(net, ifindex)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5151 | 			return ifindex; | 
 | 5152 | 	} | 
 | 5153 | } | 
 | 5154 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5155 | /* Delayed registration/unregisteration */ | 
| Denis Cheng | 3b5b34f | 2007-12-07 00:49:17 -0800 | [diff] [blame] | 5156 | static LIST_HEAD(net_todo_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5157 |  | 
| Stephen Hemminger | 6f05f62 | 2007-03-08 20:46:03 -0800 | [diff] [blame] | 5158 | static void net_set_todo(struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5159 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5160 | 	list_add_tail(&dev->todo_list, &net_todo_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5161 | } | 
 | 5162 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5163 | static void rollback_registered_many(struct list_head *head) | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5164 | { | 
| Krishna Kumar | e93737b | 2009-12-08 22:26:02 +0000 | [diff] [blame] | 5165 | 	struct net_device *dev, *tmp; | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5166 |  | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5167 | 	BUG_ON(dev_boot_phase); | 
 | 5168 | 	ASSERT_RTNL(); | 
 | 5169 |  | 
| Krishna Kumar | e93737b | 2009-12-08 22:26:02 +0000 | [diff] [blame] | 5170 | 	list_for_each_entry_safe(dev, tmp, head, unreg_list) { | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5171 | 		/* Some devices call without registering | 
| Krishna Kumar | e93737b | 2009-12-08 22:26:02 +0000 | [diff] [blame] | 5172 | 		 * for initialization unwind. Remove those | 
 | 5173 | 		 * devices and proceed with the remaining. | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5174 | 		 */ | 
 | 5175 | 		if (dev->reg_state == NETREG_UNINITIALIZED) { | 
 | 5176 | 			pr_debug("unregister_netdevice: device %s/%p never " | 
 | 5177 | 				 "was registered\n", dev->name, dev); | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5178 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5179 | 			WARN_ON(1); | 
| Krishna Kumar | e93737b | 2009-12-08 22:26:02 +0000 | [diff] [blame] | 5180 | 			list_del(&dev->unreg_list); | 
 | 5181 | 			continue; | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5182 | 		} | 
 | 5183 |  | 
 | 5184 | 		BUG_ON(dev->reg_state != NETREG_REGISTERED); | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 5185 | 	} | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5186 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 5187 | 	/* If device is running, close it first. */ | 
 | 5188 | 	dev_close_many(head); | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5189 |  | 
| Octavian Purdila | 4434572 | 2010-12-13 12:44:07 +0000 | [diff] [blame] | 5190 | 	list_for_each_entry(dev, head, unreg_list) { | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5191 | 		/* And unlink it from device chain. */ | 
 | 5192 | 		unlist_netdevice(dev); | 
 | 5193 |  | 
 | 5194 | 		dev->reg_state = NETREG_UNREGISTERING; | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5195 | 	} | 
 | 5196 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5197 | 	synchronize_net(); | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5198 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5199 | 	list_for_each_entry(dev, head, unreg_list) { | 
 | 5200 | 		/* Shutdown queueing discipline. */ | 
 | 5201 | 		dev_shutdown(dev); | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5202 |  | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5203 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5204 | 		/* Notify protocols, that we are about to destroy | 
 | 5205 | 		   this device. They should clean all the things. | 
 | 5206 | 		*/ | 
 | 5207 | 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 
 | 5208 |  | 
| Patrick McHardy | a283576 | 2010-02-26 06:34:51 +0000 | [diff] [blame] | 5209 | 		if (!dev->rtnl_link_ops || | 
 | 5210 | 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | 
 | 5211 | 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | 
 | 5212 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5213 | 		/* | 
 | 5214 | 		 *	Flush the unicast and multicast chains | 
 | 5215 | 		 */ | 
| Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 5216 | 		dev_uc_flush(dev); | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 5217 | 		dev_mc_flush(dev); | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5218 |  | 
 | 5219 | 		if (dev->netdev_ops->ndo_uninit) | 
 | 5220 | 			dev->netdev_ops->ndo_uninit(dev); | 
 | 5221 |  | 
 | 5222 | 		/* Notifier chain MUST detach us from master device. */ | 
 | 5223 | 		WARN_ON(dev->master); | 
 | 5224 |  | 
 | 5225 | 		/* Remove entries from kobject tree */ | 
 | 5226 | 		netdev_unregister_kobject(dev); | 
 | 5227 | 	} | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5228 |  | 
| Eric W. Biederman | a5ee155 | 2009-11-29 15:45:58 +0000 | [diff] [blame] | 5229 | 	/* Process any work delayed until the end of the batch */ | 
| stephen hemminger | e5e26d7 | 2010-02-24 14:01:38 +0000 | [diff] [blame] | 5230 | 	dev = list_first_entry(head, struct net_device, unreg_list); | 
| Eric W. Biederman | a5ee155 | 2009-11-29 15:45:58 +0000 | [diff] [blame] | 5231 | 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 
 | 5232 |  | 
| Eric Dumazet | ef885af | 2010-09-13 12:24:54 +0000 | [diff] [blame] | 5233 | 	rcu_barrier(); | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5234 |  | 
| Eric W. Biederman | a5ee155 | 2009-11-29 15:45:58 +0000 | [diff] [blame] | 5235 | 	list_for_each_entry(dev, head, unreg_list) | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5236 | 		dev_put(dev); | 
 | 5237 | } | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5238 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5239 | static void rollback_registered(struct net_device *dev) | 
 | 5240 | { | 
 | 5241 | 	LIST_HEAD(single); | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5242 |  | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5243 | 	list_add(&dev->unreg_list, &single); | 
 | 5244 | 	rollback_registered_many(&single); | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5245 | } | 
 | 5246 |  | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5247 | u32 netdev_fix_features(struct net_device *dev, u32 features) | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 5248 | { | 
| Michał Mirosław | 57422dc | 2011-01-22 12:14:12 +0000 | [diff] [blame] | 5249 | 	/* Fix illegal checksum combinations */ | 
 | 5250 | 	if ((features & NETIF_F_HW_CSUM) && | 
 | 5251 | 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5252 | 		netdev_info(dev, "mixed HW and IP checksum settings.\n"); | 
| Michał Mirosław | 57422dc | 2011-01-22 12:14:12 +0000 | [diff] [blame] | 5253 | 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); | 
 | 5254 | 	} | 
 | 5255 |  | 
 | 5256 | 	if ((features & NETIF_F_NO_CSUM) && | 
 | 5257 | 	    (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5258 | 		netdev_info(dev, "mixed no checksumming and other settings.\n"); | 
| Michał Mirosław | 57422dc | 2011-01-22 12:14:12 +0000 | [diff] [blame] | 5259 | 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); | 
 | 5260 | 	} | 
 | 5261 |  | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 5262 | 	/* Fix illegal SG+CSUM combinations. */ | 
 | 5263 | 	if ((features & NETIF_F_SG) && | 
 | 5264 | 	    !(features & NETIF_F_ALL_CSUM)) { | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5265 | 		netdev_info(dev, | 
 | 5266 | 			    "Dropping NETIF_F_SG since no checksum feature.\n"); | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 5267 | 		features &= ~NETIF_F_SG; | 
 | 5268 | 	} | 
 | 5269 |  | 
 | 5270 | 	/* TSO requires that SG is present as well. */ | 
 | 5271 | 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5272 | 		netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 5273 | 		features &= ~NETIF_F_TSO; | 
 | 5274 | 	} | 
 | 5275 |  | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5276 | 	/* UFO needs SG and checksumming */ | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 5277 | 	if (features & NETIF_F_UFO) { | 
| Michał Mirosław | 7903264 | 2010-11-30 06:38:00 +0000 | [diff] [blame] | 5278 | 		/* maybe split UFO into V4 and V6? */ | 
 | 5279 | 		if (!((features & NETIF_F_GEN_CSUM) || | 
 | 5280 | 		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) | 
 | 5281 | 			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5282 | 			netdev_info(dev, | 
 | 5283 | 				"Dropping NETIF_F_UFO since no checksum offload features.\n"); | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 5284 | 			features &= ~NETIF_F_UFO; | 
 | 5285 | 		} | 
 | 5286 |  | 
 | 5287 | 		if (!(features & NETIF_F_SG)) { | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5288 | 			netdev_info(dev, | 
 | 5289 | 				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 5290 | 			features &= ~NETIF_F_UFO; | 
 | 5291 | 		} | 
 | 5292 | 	} | 
 | 5293 |  | 
 | 5294 | 	return features; | 
 | 5295 | } | 
 | 5296 | EXPORT_SYMBOL(netdev_fix_features); | 
 | 5297 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5298 | /** | 
| Patrick Mullaney | fc4a748 | 2009-12-03 15:59:22 -0800 | [diff] [blame] | 5299 |  *	netif_stacked_transfer_operstate -	transfer operstate | 
 | 5300 |  *	@rootdev: the root or lower level device to transfer state from | 
 | 5301 |  *	@dev: the device to transfer operstate to | 
 | 5302 |  * | 
 | 5303 |  *	Transfer operational state from root to device. This is normally | 
 | 5304 |  *	called when a stacking relationship exists between the root | 
 | 5305 |  *	device and the device(a leaf device). | 
 | 5306 |  */ | 
 | 5307 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, | 
 | 5308 | 					struct net_device *dev) | 
 | 5309 | { | 
 | 5310 | 	if (rootdev->operstate == IF_OPER_DORMANT) | 
 | 5311 | 		netif_dormant_on(dev); | 
 | 5312 | 	else | 
 | 5313 | 		netif_dormant_off(dev); | 
 | 5314 |  | 
 | 5315 | 	if (netif_carrier_ok(rootdev)) { | 
 | 5316 | 		if (!netif_carrier_ok(dev)) | 
 | 5317 | 			netif_carrier_on(dev); | 
 | 5318 | 	} else { | 
 | 5319 | 		if (netif_carrier_ok(dev)) | 
 | 5320 | 			netif_carrier_off(dev); | 
 | 5321 | 	} | 
 | 5322 | } | 
 | 5323 | EXPORT_SYMBOL(netif_stacked_transfer_operstate); | 
 | 5324 |  | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 5325 | #ifdef CONFIG_RPS | 
| Eric Dumazet | 1b4bf46 | 2010-09-23 17:26:35 +0000 | [diff] [blame] | 5326 | static int netif_alloc_rx_queues(struct net_device *dev) | 
 | 5327 | { | 
| Eric Dumazet | 1b4bf46 | 2010-09-23 17:26:35 +0000 | [diff] [blame] | 5328 | 	unsigned int i, count = dev->num_rx_queues; | 
| Tom Herbert | bd25fa7 | 2010-10-18 18:00:16 +0000 | [diff] [blame] | 5329 | 	struct netdev_rx_queue *rx; | 
| Eric Dumazet | 1b4bf46 | 2010-09-23 17:26:35 +0000 | [diff] [blame] | 5330 |  | 
| Tom Herbert | bd25fa7 | 2010-10-18 18:00:16 +0000 | [diff] [blame] | 5331 | 	BUG_ON(count < 1); | 
| Eric Dumazet | 1b4bf46 | 2010-09-23 17:26:35 +0000 | [diff] [blame] | 5332 |  | 
| Tom Herbert | bd25fa7 | 2010-10-18 18:00:16 +0000 | [diff] [blame] | 5333 | 	rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); | 
 | 5334 | 	if (!rx) { | 
 | 5335 | 		pr_err("netdev: Unable to allocate %u rx queues.\n", count); | 
 | 5336 | 		return -ENOMEM; | 
| Eric Dumazet | 1b4bf46 | 2010-09-23 17:26:35 +0000 | [diff] [blame] | 5337 | 	} | 
| Tom Herbert | bd25fa7 | 2010-10-18 18:00:16 +0000 | [diff] [blame] | 5338 | 	dev->_rx = rx; | 
 | 5339 |  | 
| Tom Herbert | bd25fa7 | 2010-10-18 18:00:16 +0000 | [diff] [blame] | 5340 | 	for (i = 0; i < count; i++) | 
| Tom Herbert | fe82224 | 2010-11-09 10:47:38 +0000 | [diff] [blame] | 5341 | 		rx[i].dev = dev; | 
| Eric Dumazet | 1b4bf46 | 2010-09-23 17:26:35 +0000 | [diff] [blame] | 5342 | 	return 0; | 
 | 5343 | } | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 5344 | #endif | 
| Eric Dumazet | 1b4bf46 | 2010-09-23 17:26:35 +0000 | [diff] [blame] | 5345 |  | 
| Changli Gao | aa94210 | 2010-12-04 02:31:41 +0000 | [diff] [blame] | 5346 | static void netdev_init_one_queue(struct net_device *dev, | 
 | 5347 | 				  struct netdev_queue *queue, void *_unused) | 
 | 5348 | { | 
 | 5349 | 	/* Initialize queue lock */ | 
 | 5350 | 	spin_lock_init(&queue->_xmit_lock); | 
 | 5351 | 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); | 
 | 5352 | 	queue->xmit_lock_owner = -1; | 
| Changli Gao | b236da6 | 2010-12-14 03:09:15 +0000 | [diff] [blame] | 5353 | 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE); | 
| Changli Gao | aa94210 | 2010-12-04 02:31:41 +0000 | [diff] [blame] | 5354 | 	queue->dev = dev; | 
 | 5355 | } | 
 | 5356 |  | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 5357 | static int netif_alloc_netdev_queues(struct net_device *dev) | 
 | 5358 | { | 
 | 5359 | 	unsigned int count = dev->num_tx_queues; | 
 | 5360 | 	struct netdev_queue *tx; | 
 | 5361 |  | 
 | 5362 | 	BUG_ON(count < 1); | 
 | 5363 |  | 
 | 5364 | 	tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); | 
 | 5365 | 	if (!tx) { | 
 | 5366 | 		pr_err("netdev: Unable to allocate %u tx queues.\n", | 
 | 5367 | 		       count); | 
 | 5368 | 		return -ENOMEM; | 
 | 5369 | 	} | 
 | 5370 | 	dev->_tx = tx; | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 5371 |  | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 5372 | 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | 
 | 5373 | 	spin_lock_init(&dev->tx_global_lock); | 
| Changli Gao | aa94210 | 2010-12-04 02:31:41 +0000 | [diff] [blame] | 5374 |  | 
 | 5375 | 	return 0; | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 5376 | } | 
 | 5377 |  | 
| Patrick Mullaney | fc4a748 | 2009-12-03 15:59:22 -0800 | [diff] [blame] | 5378 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5379 |  *	register_netdevice	- register a network device | 
 | 5380 |  *	@dev: device to register | 
 | 5381 |  * | 
 | 5382 |  *	Take a completed network device structure and add it to the kernel | 
 | 5383 |  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier | 
 | 5384 |  *	chain. 0 is returned on success. A negative errno code is returned | 
 | 5385 |  *	on a failure to set up the device, or if the name is a duplicate. | 
 | 5386 |  * | 
 | 5387 |  *	Callers must hold the rtnl semaphore. You may want | 
 | 5388 |  *	register_netdev() instead of this. | 
 | 5389 |  * | 
 | 5390 |  *	BUGS: | 
 | 5391 |  *	The locking appears insufficient to guarantee two parallel registers | 
 | 5392 |  *	will not get the same name. | 
 | 5393 |  */ | 
 | 5394 |  | 
 | 5395 | int register_netdevice(struct net_device *dev) | 
 | 5396 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5397 | 	int ret; | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 5398 | 	struct net *net = dev_net(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5399 |  | 
 | 5400 | 	BUG_ON(dev_boot_phase); | 
 | 5401 | 	ASSERT_RTNL(); | 
 | 5402 |  | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5403 | 	might_sleep(); | 
 | 5404 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5405 | 	/* When net_device's are persistent, this will be fatal. */ | 
 | 5406 | 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 5407 | 	BUG_ON(!net); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5408 |  | 
| David S. Miller | f1f28aa | 2008-07-15 00:08:33 -0700 | [diff] [blame] | 5409 | 	spin_lock_init(&dev->addr_list_lock); | 
| David S. Miller | cf508b1 | 2008-07-22 14:16:42 -0700 | [diff] [blame] | 5410 | 	netdev_set_addr_lockdep_class(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5411 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5412 | 	dev->iflink = -1; | 
 | 5413 |  | 
 | 5414 | 	/* Init, if this function is available */ | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 5415 | 	if (dev->netdev_ops->ndo_init) { | 
 | 5416 | 		ret = dev->netdev_ops->ndo_init(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5417 | 		if (ret) { | 
 | 5418 | 			if (ret > 0) | 
 | 5419 | 				ret = -EIO; | 
| Adrian Bunk | 90833aa | 2006-11-13 16:02:22 -0800 | [diff] [blame] | 5420 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5421 | 		} | 
 | 5422 | 	} | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 5423 |  | 
| Daniel Lezcano | 8ce6cebc | 2010-05-19 10:12:19 +0000 | [diff] [blame] | 5424 | 	ret = dev_get_valid_name(dev, dev->name, 0); | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 5425 | 	if (ret) | 
| Herbert Xu | 7ce1b0e | 2007-07-30 16:29:40 -0700 | [diff] [blame] | 5426 | 		goto err_uninit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5427 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 5428 | 	dev->ifindex = dev_new_index(net); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5429 | 	if (dev->iflink == -1) | 
 | 5430 | 		dev->iflink = dev->ifindex; | 
 | 5431 |  | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 5432 | 	dev->features = netdev_fix_features(dev, dev->features); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5433 |  | 
| Lennert Buytenhek | e5a4a72 | 2008-08-03 01:23:10 -0700 | [diff] [blame] | 5434 | 	/* Enable software GSO if SG is supported. */ | 
 | 5435 | 	if (dev->features & NETIF_F_SG) | 
 | 5436 | 		dev->features |= NETIF_F_GSO; | 
 | 5437 |  | 
| Eric Dumazet | c5256c5 | 2010-09-23 00:46:11 +0000 | [diff] [blame] | 5438 | 	/* Enable GRO and NETIF_F_HIGHDMA for vlans by default, | 
 | 5439 | 	 * vlan_dev_init() will do the dev->features check, so these features | 
 | 5440 | 	 * are enabled only if supported by underlying device. | 
| Brandon Philips | 16c3ea7 | 2010-09-15 09:24:24 +0000 | [diff] [blame] | 5441 | 	 */ | 
| Eric Dumazet | c5256c5 | 2010-09-23 00:46:11 +0000 | [diff] [blame] | 5442 | 	dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA); | 
| Brandon Philips | 16c3ea7 | 2010-09-15 09:24:24 +0000 | [diff] [blame] | 5443 |  | 
| Johannes Berg | 7ffbe3f | 2009-10-02 05:15:27 +0000 | [diff] [blame] | 5444 | 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); | 
 | 5445 | 	ret = notifier_to_errno(ret); | 
 | 5446 | 	if (ret) | 
 | 5447 | 		goto err_uninit; | 
 | 5448 |  | 
| Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 5449 | 	ret = netdev_register_kobject(dev); | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5450 | 	if (ret) | 
| Herbert Xu | 7ce1b0e | 2007-07-30 16:29:40 -0700 | [diff] [blame] | 5451 | 		goto err_uninit; | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5452 | 	dev->reg_state = NETREG_REGISTERED; | 
 | 5453 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5454 | 	/* | 
 | 5455 | 	 *	Default initial state at registry is that the | 
 | 5456 | 	 *	device is present. | 
 | 5457 | 	 */ | 
 | 5458 |  | 
 | 5459 | 	set_bit(__LINK_STATE_PRESENT, &dev->state); | 
 | 5460 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5461 | 	dev_init_scheduler(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5462 | 	dev_hold(dev); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 5463 | 	list_netdevice(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5464 |  | 
 | 5465 | 	/* Notify protocols, that a new device appeared. */ | 
| Pavel Emelyanov | 056925a | 2007-09-16 15:42:43 -0700 | [diff] [blame] | 5466 | 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); | 
| Herbert Xu | fcc5a03 | 2007-07-30 17:03:38 -0700 | [diff] [blame] | 5467 | 	ret = notifier_to_errno(ret); | 
| Daniel Lezcano | 93ee31f | 2007-10-30 15:38:18 -0700 | [diff] [blame] | 5468 | 	if (ret) { | 
 | 5469 | 		rollback_registered(dev); | 
 | 5470 | 		dev->reg_state = NETREG_UNREGISTERED; | 
 | 5471 | 	} | 
| Eric W. Biederman | d90a909 | 2009-12-12 22:11:15 +0000 | [diff] [blame] | 5472 | 	/* | 
 | 5473 | 	 *	Prevent userspace races by waiting until the network | 
 | 5474 | 	 *	device is fully setup before sending notifications. | 
 | 5475 | 	 */ | 
| Patrick McHardy | a283576 | 2010-02-26 06:34:51 +0000 | [diff] [blame] | 5476 | 	if (!dev->rtnl_link_ops || | 
 | 5477 | 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | 
 | 5478 | 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5479 |  | 
 | 5480 | out: | 
 | 5481 | 	return ret; | 
| Herbert Xu | 7ce1b0e | 2007-07-30 16:29:40 -0700 | [diff] [blame] | 5482 |  | 
 | 5483 | err_uninit: | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 5484 | 	if (dev->netdev_ops->ndo_uninit) | 
 | 5485 | 		dev->netdev_ops->ndo_uninit(dev); | 
| Herbert Xu | 7ce1b0e | 2007-07-30 16:29:40 -0700 | [diff] [blame] | 5486 | 	goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5487 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5488 | EXPORT_SYMBOL(register_netdevice); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5489 |  | 
 | 5490 | /** | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 5491 |  *	init_dummy_netdev	- init a dummy network device for NAPI | 
 | 5492 |  *	@dev: device to init | 
 | 5493 |  * | 
 | 5494 |  *	This takes a network device structure and initialize the minimum | 
 | 5495 |  *	amount of fields so it can be used to schedule NAPI polls without | 
 | 5496 |  *	registering a full blown interface. This is to be used by drivers | 
 | 5497 |  *	that need to tie several hardware interfaces to a single NAPI | 
 | 5498 |  *	poll scheduler due to HW limitations. | 
 | 5499 |  */ | 
 | 5500 | int init_dummy_netdev(struct net_device *dev) | 
 | 5501 | { | 
 | 5502 | 	/* Clear everything. Note we don't initialize spinlocks | 
 | 5503 | 	 * are they aren't supposed to be taken by any of the | 
 | 5504 | 	 * NAPI code and this dummy netdev is supposed to be | 
 | 5505 | 	 * only ever used for NAPI polls | 
 | 5506 | 	 */ | 
 | 5507 | 	memset(dev, 0, sizeof(struct net_device)); | 
 | 5508 |  | 
 | 5509 | 	/* make sure we BUG if trying to hit standard | 
 | 5510 | 	 * register/unregister code path | 
 | 5511 | 	 */ | 
 | 5512 | 	dev->reg_state = NETREG_DUMMY; | 
 | 5513 |  | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 5514 | 	/* NAPI wants this */ | 
 | 5515 | 	INIT_LIST_HEAD(&dev->napi_list); | 
 | 5516 |  | 
 | 5517 | 	/* a dummy interface is started by default */ | 
 | 5518 | 	set_bit(__LINK_STATE_PRESENT, &dev->state); | 
 | 5519 | 	set_bit(__LINK_STATE_START, &dev->state); | 
 | 5520 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5521 | 	/* Note : We dont allocate pcpu_refcnt for dummy devices, | 
 | 5522 | 	 * because users of this 'device' dont need to change | 
 | 5523 | 	 * its refcount. | 
 | 5524 | 	 */ | 
 | 5525 |  | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 5526 | 	return 0; | 
 | 5527 | } | 
 | 5528 | EXPORT_SYMBOL_GPL(init_dummy_netdev); | 
 | 5529 |  | 
 | 5530 |  | 
 | 5531 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5532 |  *	register_netdev	- register a network device | 
 | 5533 |  *	@dev: device to register | 
 | 5534 |  * | 
 | 5535 |  *	Take a completed network device structure and add it to the kernel | 
 | 5536 |  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier | 
 | 5537 |  *	chain. 0 is returned on success. A negative errno code is returned | 
 | 5538 |  *	on a failure to set up the device, or if the name is a duplicate. | 
 | 5539 |  * | 
| Borislav Petkov | 38b4da3 | 2007-04-20 22:14:10 -0700 | [diff] [blame] | 5540 |  *	This is a wrapper around register_netdevice that takes the rtnl semaphore | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5541 |  *	and expands the device name if you passed a format string to | 
 | 5542 |  *	alloc_netdev. | 
 | 5543 |  */ | 
 | 5544 | int register_netdev(struct net_device *dev) | 
 | 5545 | { | 
 | 5546 | 	int err; | 
 | 5547 |  | 
 | 5548 | 	rtnl_lock(); | 
 | 5549 |  | 
 | 5550 | 	/* | 
 | 5551 | 	 * If the name is a format string the caller wants us to do a | 
 | 5552 | 	 * name allocation. | 
 | 5553 | 	 */ | 
 | 5554 | 	if (strchr(dev->name, '%')) { | 
 | 5555 | 		err = dev_alloc_name(dev, dev->name); | 
 | 5556 | 		if (err < 0) | 
 | 5557 | 			goto out; | 
 | 5558 | 	} | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 5559 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5560 | 	err = register_netdevice(dev); | 
 | 5561 | out: | 
 | 5562 | 	rtnl_unlock(); | 
 | 5563 | 	return err; | 
 | 5564 | } | 
 | 5565 | EXPORT_SYMBOL(register_netdev); | 
 | 5566 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5567 | int netdev_refcnt_read(const struct net_device *dev) | 
 | 5568 | { | 
 | 5569 | 	int i, refcnt = 0; | 
 | 5570 |  | 
 | 5571 | 	for_each_possible_cpu(i) | 
 | 5572 | 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); | 
 | 5573 | 	return refcnt; | 
 | 5574 | } | 
 | 5575 | EXPORT_SYMBOL(netdev_refcnt_read); | 
 | 5576 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5577 | /* | 
 | 5578 |  * netdev_wait_allrefs - wait until all references are gone. | 
 | 5579 |  * | 
 | 5580 |  * This is called when unregistering network devices. | 
 | 5581 |  * | 
 | 5582 |  * Any protocol or device that holds a reference should register | 
 | 5583 |  * for netdevice notification, and cleanup and put back the | 
 | 5584 |  * reference if they receive an UNREGISTER event. | 
 | 5585 |  * We can get stuck here if buggy protocols don't correctly | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 5586 |  * call dev_put. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5587 |  */ | 
 | 5588 | static void netdev_wait_allrefs(struct net_device *dev) | 
 | 5589 | { | 
 | 5590 | 	unsigned long rebroadcast_time, warning_time; | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5591 | 	int refcnt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5592 |  | 
| Eric Dumazet | e014deb | 2009-11-17 05:59:21 +0000 | [diff] [blame] | 5593 | 	linkwatch_forget_dev(dev); | 
 | 5594 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5595 | 	rebroadcast_time = warning_time = jiffies; | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5596 | 	refcnt = netdev_refcnt_read(dev); | 
 | 5597 |  | 
 | 5598 | 	while (refcnt != 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5599 | 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { | 
| Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 5600 | 			rtnl_lock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5601 |  | 
 | 5602 | 			/* Rebroadcast unregister notification */ | 
| Pavel Emelyanov | 056925a | 2007-09-16 15:42:43 -0700 | [diff] [blame] | 5603 | 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 
| Eric W. Biederman | a5ee155 | 2009-11-29 15:45:58 +0000 | [diff] [blame] | 5604 | 			/* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users | 
| Octavian Purdila | 395264d | 2009-11-16 13:49:35 +0000 | [diff] [blame] | 5605 | 			 * should have already handle it the first time */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5606 |  | 
 | 5607 | 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING, | 
 | 5608 | 				     &dev->state)) { | 
 | 5609 | 				/* We must not have linkwatch events | 
 | 5610 | 				 * pending on unregister. If this | 
 | 5611 | 				 * happens, we simply run the queue | 
 | 5612 | 				 * unscheduled, resulting in a noop | 
 | 5613 | 				 * for this device. | 
 | 5614 | 				 */ | 
 | 5615 | 				linkwatch_run_queue(); | 
 | 5616 | 			} | 
 | 5617 |  | 
| Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 5618 | 			__rtnl_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5619 |  | 
 | 5620 | 			rebroadcast_time = jiffies; | 
 | 5621 | 		} | 
 | 5622 |  | 
 | 5623 | 		msleep(250); | 
 | 5624 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5625 | 		refcnt = netdev_refcnt_read(dev); | 
 | 5626 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5627 | 		if (time_after(jiffies, warning_time + 10 * HZ)) { | 
 | 5628 | 			printk(KERN_EMERG "unregister_netdevice: " | 
 | 5629 | 			       "waiting for %s to become free. Usage " | 
 | 5630 | 			       "count = %d\n", | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5631 | 			       dev->name, refcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5632 | 			warning_time = jiffies; | 
 | 5633 | 		} | 
 | 5634 | 	} | 
 | 5635 | } | 
 | 5636 |  | 
 | 5637 | /* The sequence is: | 
 | 5638 |  * | 
 | 5639 |  *	rtnl_lock(); | 
 | 5640 |  *	... | 
 | 5641 |  *	register_netdevice(x1); | 
 | 5642 |  *	register_netdevice(x2); | 
 | 5643 |  *	... | 
 | 5644 |  *	unregister_netdevice(y1); | 
 | 5645 |  *	unregister_netdevice(y2); | 
 | 5646 |  *      ... | 
 | 5647 |  *	rtnl_unlock(); | 
 | 5648 |  *	free_netdev(y1); | 
 | 5649 |  *	free_netdev(y2); | 
 | 5650 |  * | 
| Herbert Xu | 58ec3b4 | 2008-10-07 15:50:03 -0700 | [diff] [blame] | 5651 |  * We are invoked by rtnl_unlock(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5652 |  * This allows us to deal with problems: | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5653 |  * 1) We can delete sysfs objects which invoke hotplug | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5654 |  *    without deadlocking with linkwatch via keventd. | 
 | 5655 |  * 2) Since we run with the RTNL semaphore not held, we can sleep | 
 | 5656 |  *    safely in order to wait for the netdev refcnt to drop to zero. | 
| Herbert Xu | 58ec3b4 | 2008-10-07 15:50:03 -0700 | [diff] [blame] | 5657 |  * | 
 | 5658 |  * We must not return until all unregister events added during | 
 | 5659 |  * the interval the lock was held have been completed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5660 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5661 | void netdev_run_todo(void) | 
 | 5662 | { | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 5663 | 	struct list_head list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5664 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5665 | 	/* Snapshot list, allow later requests */ | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 5666 | 	list_replace_init(&net_todo_list, &list); | 
| Herbert Xu | 58ec3b4 | 2008-10-07 15:50:03 -0700 | [diff] [blame] | 5667 |  | 
 | 5668 | 	__rtnl_unlock(); | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 5669 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5670 | 	while (!list_empty(&list)) { | 
 | 5671 | 		struct net_device *dev | 
| stephen hemminger | e5e26d7 | 2010-02-24 14:01:38 +0000 | [diff] [blame] | 5672 | 			= list_first_entry(&list, struct net_device, todo_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5673 | 		list_del(&dev->todo_list); | 
 | 5674 |  | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5675 | 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5676 | 			printk(KERN_ERR "network todo '%s' but state %d\n", | 
 | 5677 | 			       dev->name, dev->reg_state); | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5678 | 			dump_stack(); | 
 | 5679 | 			continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5680 | 		} | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5681 |  | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5682 | 		dev->reg_state = NETREG_UNREGISTERED; | 
 | 5683 |  | 
| Changli Gao | 152102c | 2010-03-30 20:16:22 +0000 | [diff] [blame] | 5684 | 		on_each_cpu(flush_backlog, dev, 1); | 
| Stephen Hemminger | 6e583ce | 2008-08-03 21:29:57 -0700 | [diff] [blame] | 5685 |  | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5686 | 		netdev_wait_allrefs(dev); | 
 | 5687 |  | 
 | 5688 | 		/* paranoia */ | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5689 | 		BUG_ON(netdev_refcnt_read(dev)); | 
| Eric Dumazet | 95ae6b2 | 2010-09-15 04:04:31 +0000 | [diff] [blame] | 5690 | 		WARN_ON(rcu_dereference_raw(dev->ip_ptr)); | 
| Eric Dumazet | 198caec | 2010-10-24 21:32:05 +0000 | [diff] [blame] | 5691 | 		WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); | 
| Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 5692 | 		WARN_ON(dev->dn_ptr); | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5693 |  | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 5694 | 		if (dev->destructor) | 
 | 5695 | 			dev->destructor(dev); | 
| Stephen Hemminger | 9093bbb | 2007-05-19 15:39:25 -0700 | [diff] [blame] | 5696 |  | 
 | 5697 | 		/* Free network device */ | 
 | 5698 | 		kobject_put(&dev->dev.kobj); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5699 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5700 | } | 
 | 5701 |  | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 5702 | /* Convert net_device_stats to rtnl_link_stats64.  They have the same | 
 | 5703 |  * fields in the same order, with only the type differing. | 
 | 5704 |  */ | 
 | 5705 | static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, | 
 | 5706 | 				    const struct net_device_stats *netdev_stats) | 
 | 5707 | { | 
 | 5708 | #if BITS_PER_LONG == 64 | 
 | 5709 |         BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); | 
 | 5710 |         memcpy(stats64, netdev_stats, sizeof(*stats64)); | 
 | 5711 | #else | 
 | 5712 | 	size_t i, n = sizeof(*stats64) / sizeof(u64); | 
 | 5713 | 	const unsigned long *src = (const unsigned long *)netdev_stats; | 
 | 5714 | 	u64 *dst = (u64 *)stats64; | 
 | 5715 |  | 
 | 5716 | 	BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != | 
 | 5717 | 		     sizeof(*stats64) / sizeof(u64)); | 
 | 5718 | 	for (i = 0; i < n; i++) | 
 | 5719 | 		dst[i] = src[i]; | 
 | 5720 | #endif | 
 | 5721 | } | 
 | 5722 |  | 
| Eric Dumazet | d83345a | 2009-11-16 03:36:51 +0000 | [diff] [blame] | 5723 | /** | 
| Stephen Hemminger | eeda3fd | 2008-11-19 21:40:23 -0800 | [diff] [blame] | 5724 |  *	dev_get_stats	- get network device statistics | 
 | 5725 |  *	@dev: device to get statistics from | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 5726 |  *	@storage: place to store stats | 
| Stephen Hemminger | eeda3fd | 2008-11-19 21:40:23 -0800 | [diff] [blame] | 5727 |  * | 
| Ben Hutchings | d775351 | 2010-07-09 09:12:41 +0000 | [diff] [blame] | 5728 |  *	Get network statistics from device. Return @storage. | 
 | 5729 |  *	The device driver may provide its own method by setting | 
 | 5730 |  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; | 
 | 5731 |  *	otherwise the internal statistics structure is used. | 
| Stephen Hemminger | eeda3fd | 2008-11-19 21:40:23 -0800 | [diff] [blame] | 5732 |  */ | 
| Ben Hutchings | d775351 | 2010-07-09 09:12:41 +0000 | [diff] [blame] | 5733 | struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | 
 | 5734 | 					struct rtnl_link_stats64 *storage) | 
| Eric Dumazet | 7004bf2 | 2009-05-18 00:34:33 +0000 | [diff] [blame] | 5735 | { | 
| Stephen Hemminger | eeda3fd | 2008-11-19 21:40:23 -0800 | [diff] [blame] | 5736 | 	const struct net_device_ops *ops = dev->netdev_ops; | 
 | 5737 |  | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 5738 | 	if (ops->ndo_get_stats64) { | 
 | 5739 | 		memset(storage, 0, sizeof(*storage)); | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 5740 | 		ops->ndo_get_stats64(dev, storage); | 
 | 5741 | 	} else if (ops->ndo_get_stats) { | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 5742 | 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 5743 | 	} else { | 
 | 5744 | 		netdev_stats_to_stats64(storage, &dev->stats); | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 5745 | 	} | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 5746 | 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped); | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 5747 | 	return storage; | 
| Rusty Russell | c45d286 | 2007-03-28 14:29:08 -0700 | [diff] [blame] | 5748 | } | 
| Stephen Hemminger | eeda3fd | 2008-11-19 21:40:23 -0800 | [diff] [blame] | 5749 | EXPORT_SYMBOL(dev_get_stats); | 
| Rusty Russell | c45d286 | 2007-03-28 14:29:08 -0700 | [diff] [blame] | 5750 |  | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 5751 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | 
| David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 5752 | { | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 5753 | 	struct netdev_queue *queue = dev_ingress_queue(dev); | 
| David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 5754 |  | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 5755 | #ifdef CONFIG_NET_CLS_ACT | 
 | 5756 | 	if (queue) | 
 | 5757 | 		return queue; | 
 | 5758 | 	queue = kzalloc(sizeof(*queue), GFP_KERNEL); | 
 | 5759 | 	if (!queue) | 
 | 5760 | 		return NULL; | 
 | 5761 | 	netdev_init_one_queue(dev, queue, NULL); | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 5762 | 	queue->qdisc = &noop_qdisc; | 
 | 5763 | 	queue->qdisc_sleeping = &noop_qdisc; | 
 | 5764 | 	rcu_assign_pointer(dev->ingress_queue, queue); | 
 | 5765 | #endif | 
 | 5766 | 	return queue; | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 5767 | } | 
 | 5768 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5769 | /** | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 5770 |  *	alloc_netdev_mqs - allocate network device | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5771 |  *	@sizeof_priv:	size of private data to allocate space for | 
 | 5772 |  *	@name:		device name format string | 
 | 5773 |  *	@setup:		callback to initialize device | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 5774 |  *	@txqs:		the number of TX subqueues to allocate | 
 | 5775 |  *	@rxqs:		the number of RX subqueues to allocate | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5776 |  * | 
 | 5777 |  *	Allocates a struct net_device with private data area for driver use | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 5778 |  *	and performs basic initialization.  Also allocates subquue structs | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 5779 |  *	for each queue on the device. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5780 |  */ | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 5781 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, | 
 | 5782 | 		void (*setup)(struct net_device *), | 
 | 5783 | 		unsigned int txqs, unsigned int rxqs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5784 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5785 | 	struct net_device *dev; | 
| Stephen Hemminger | 7943986 | 2008-07-21 13:28:44 -0700 | [diff] [blame] | 5786 | 	size_t alloc_size; | 
| Eric Dumazet | 1ce8e7b | 2009-05-27 04:42:37 +0000 | [diff] [blame] | 5787 | 	struct net_device *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5788 |  | 
| Stephen Hemminger | b6fe17d | 2006-08-29 17:06:13 -0700 | [diff] [blame] | 5789 | 	BUG_ON(strlen(name) >= sizeof(dev->name)); | 
 | 5790 |  | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 5791 | 	if (txqs < 1) { | 
| Tom Herbert | 55513fb | 2010-10-18 17:55:58 +0000 | [diff] [blame] | 5792 | 		pr_err("alloc_netdev: Unable to allocate device " | 
 | 5793 | 		       "with zero queues.\n"); | 
 | 5794 | 		return NULL; | 
 | 5795 | 	} | 
 | 5796 |  | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 5797 | #ifdef CONFIG_RPS | 
 | 5798 | 	if (rxqs < 1) { | 
 | 5799 | 		pr_err("alloc_netdev: Unable to allocate device " | 
 | 5800 | 		       "with zero RX queues.\n"); | 
 | 5801 | 		return NULL; | 
 | 5802 | 	} | 
 | 5803 | #endif | 
 | 5804 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 5805 | 	alloc_size = sizeof(struct net_device); | 
| Alexey Dobriyan | d1643d2 | 2008-04-18 15:43:32 -0700 | [diff] [blame] | 5806 | 	if (sizeof_priv) { | 
 | 5807 | 		/* ensure 32-byte alignment of private area */ | 
| Eric Dumazet | 1ce8e7b | 2009-05-27 04:42:37 +0000 | [diff] [blame] | 5808 | 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); | 
| Alexey Dobriyan | d1643d2 | 2008-04-18 15:43:32 -0700 | [diff] [blame] | 5809 | 		alloc_size += sizeof_priv; | 
 | 5810 | 	} | 
 | 5811 | 	/* ensure 32-byte alignment of whole construct */ | 
| Eric Dumazet | 1ce8e7b | 2009-05-27 04:42:37 +0000 | [diff] [blame] | 5812 | 	alloc_size += NETDEV_ALIGN - 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5813 |  | 
| Paolo 'Blaisorblade' Giarrusso | 31380de | 2006-04-06 22:38:28 -0700 | [diff] [blame] | 5814 | 	p = kzalloc(alloc_size, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5815 | 	if (!p) { | 
| Stephen Hemminger | b6fe17d | 2006-08-29 17:06:13 -0700 | [diff] [blame] | 5816 | 		printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5817 | 		return NULL; | 
 | 5818 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5819 |  | 
| Eric Dumazet | 1ce8e7b | 2009-05-27 04:42:37 +0000 | [diff] [blame] | 5820 | 	dev = PTR_ALIGN(p, NETDEV_ALIGN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5821 | 	dev->padded = (char *)dev - (char *)p; | 
| Jiri Pirko | ab9c73c | 2009-05-08 13:30:17 +0000 | [diff] [blame] | 5822 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5823 | 	dev->pcpu_refcnt = alloc_percpu(int); | 
 | 5824 | 	if (!dev->pcpu_refcnt) | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 5825 | 		goto free_p; | 
| Jiri Pirko | ab9c73c | 2009-05-08 13:30:17 +0000 | [diff] [blame] | 5826 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5827 | 	if (dev_addr_init(dev)) | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5828 | 		goto free_pcpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5829 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 5830 | 	dev_mc_init(dev); | 
| Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 5831 | 	dev_uc_init(dev); | 
| Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 5832 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 5833 | 	dev_net_set(dev, &init_net); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5834 |  | 
| Peter P Waskiewicz Jr | 82cc1a7 | 2008-03-21 03:43:19 -0700 | [diff] [blame] | 5835 | 	dev->gso_max_size = GSO_MAX_SIZE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5836 |  | 
| Peter P Waskiewicz Jr | 15682bc | 2010-02-10 20:03:05 -0800 | [diff] [blame] | 5837 | 	INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | 
 | 5838 | 	dev->ethtool_ntuple_list.count = 0; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 5839 | 	INIT_LIST_HEAD(&dev->napi_list); | 
| Eric W. Biederman | 9fdce09 | 2009-10-30 14:51:13 +0000 | [diff] [blame] | 5840 | 	INIT_LIST_HEAD(&dev->unreg_list); | 
| Eric Dumazet | e014deb | 2009-11-17 05:59:21 +0000 | [diff] [blame] | 5841 | 	INIT_LIST_HEAD(&dev->link_watch_list); | 
| Eric Dumazet | 93f154b | 2009-05-18 22:19:19 -0700 | [diff] [blame] | 5842 | 	dev->priv_flags = IFF_XMIT_DST_RELEASE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5843 | 	setup(dev); | 
| David S. Miller | 8d3bdbd | 2011-02-08 15:02:50 -0800 | [diff] [blame] | 5844 |  | 
 | 5845 | 	dev->num_tx_queues = txqs; | 
 | 5846 | 	dev->real_num_tx_queues = txqs; | 
 | 5847 | 	if (netif_alloc_netdev_queues(dev)) | 
 | 5848 | 		goto free_all; | 
 | 5849 |  | 
 | 5850 | #ifdef CONFIG_RPS | 
 | 5851 | 	dev->num_rx_queues = rxqs; | 
 | 5852 | 	dev->real_num_rx_queues = rxqs; | 
 | 5853 | 	if (netif_alloc_rx_queues(dev)) | 
 | 5854 | 		goto free_all; | 
 | 5855 | #endif | 
 | 5856 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5857 | 	strcpy(dev->name, name); | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 5858 | 	dev->group = INIT_NETDEV_GROUP; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5859 | 	return dev; | 
| Jiri Pirko | ab9c73c | 2009-05-08 13:30:17 +0000 | [diff] [blame] | 5860 |  | 
| David S. Miller | 8d3bdbd | 2011-02-08 15:02:50 -0800 | [diff] [blame] | 5861 | free_all: | 
 | 5862 | 	free_netdev(dev); | 
 | 5863 | 	return NULL; | 
 | 5864 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5865 | free_pcpu: | 
 | 5866 | 	free_percpu(dev->pcpu_refcnt); | 
| Tom Herbert | ed9af2e | 2010-11-09 10:47:30 +0000 | [diff] [blame] | 5867 | 	kfree(dev->_tx); | 
| Tom Herbert | fe82224 | 2010-11-09 10:47:38 +0000 | [diff] [blame] | 5868 | #ifdef CONFIG_RPS | 
 | 5869 | 	kfree(dev->_rx); | 
 | 5870 | #endif | 
 | 5871 |  | 
| Jiri Pirko | ab9c73c | 2009-05-08 13:30:17 +0000 | [diff] [blame] | 5872 | free_p: | 
 | 5873 | 	kfree(p); | 
 | 5874 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5875 | } | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 5876 | EXPORT_SYMBOL(alloc_netdev_mqs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5877 |  | 
 | 5878 | /** | 
 | 5879 |  *	free_netdev - free network device | 
 | 5880 |  *	@dev: device | 
 | 5881 |  * | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 5882 |  *	This function does the last stage of destroying an allocated device | 
 | 5883 |  * 	interface. The reference to the device object is released. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5884 |  *	If this is the last reference then it will be freed. | 
 | 5885 |  */ | 
 | 5886 | void free_netdev(struct net_device *dev) | 
 | 5887 | { | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 5888 | 	struct napi_struct *p, *n; | 
 | 5889 |  | 
| Denis V. Lunev | f3005d7 | 2008-04-16 02:02:18 -0700 | [diff] [blame] | 5890 | 	release_net(dev_net(dev)); | 
 | 5891 |  | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 5892 | 	kfree(dev->_tx); | 
| Tom Herbert | fe82224 | 2010-11-09 10:47:38 +0000 | [diff] [blame] | 5893 | #ifdef CONFIG_RPS | 
 | 5894 | 	kfree(dev->_rx); | 
 | 5895 | #endif | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 5896 |  | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 5897 | 	kfree(rcu_dereference_raw(dev->ingress_queue)); | 
 | 5898 |  | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 5899 | 	/* Flush device addresses */ | 
 | 5900 | 	dev_addr_flush(dev); | 
 | 5901 |  | 
| Peter P Waskiewicz Jr | 15682bc | 2010-02-10 20:03:05 -0800 | [diff] [blame] | 5902 | 	/* Clear ethtool n-tuple list */ | 
 | 5903 | 	ethtool_ntuple_flush(dev); | 
 | 5904 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 5905 | 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) | 
 | 5906 | 		netif_napi_del(p); | 
 | 5907 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 5908 | 	free_percpu(dev->pcpu_refcnt); | 
 | 5909 | 	dev->pcpu_refcnt = NULL; | 
 | 5910 |  | 
| Stephen Hemminger | 3041a06 | 2006-05-26 13:25:24 -0700 | [diff] [blame] | 5911 | 	/*  Compatibility with error handling in drivers */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5912 | 	if (dev->reg_state == NETREG_UNINITIALIZED) { | 
 | 5913 | 		kfree((char *)dev - dev->padded); | 
 | 5914 | 		return; | 
 | 5915 | 	} | 
 | 5916 |  | 
 | 5917 | 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED); | 
 | 5918 | 	dev->reg_state = NETREG_RELEASED; | 
 | 5919 |  | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 5920 | 	/* will free via device release */ | 
 | 5921 | 	put_device(&dev->dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5922 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5923 | EXPORT_SYMBOL(free_netdev); | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 5924 |  | 
| Stephen Hemminger | f0db275 | 2008-09-30 02:23:58 -0700 | [diff] [blame] | 5925 | /** | 
 | 5926 |  *	synchronize_net -  Synchronize with packet receive processing | 
 | 5927 |  * | 
 | 5928 |  *	Wait for packets currently being received to be done. | 
 | 5929 |  *	Does not block later packets from starting. | 
 | 5930 |  */ | 
| YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 5931 | void synchronize_net(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5932 | { | 
 | 5933 | 	might_sleep(); | 
| Paul E. McKenney | fbd568a3e | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 5934 | 	synchronize_rcu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5935 | } | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 5936 | EXPORT_SYMBOL(synchronize_net); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5937 |  | 
 | 5938 | /** | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 5939 |  *	unregister_netdevice_queue - remove device from the kernel | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5940 |  *	@dev: device | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 5941 |  *	@head: list | 
| Jaswinder Singh Rajput | 6ebfbc0 | 2009-11-22 20:43:13 -0800 | [diff] [blame] | 5942 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5943 |  *	This function shuts down a device interface and removes it | 
| Wang Chen | d59b54b | 2007-12-11 02:28:03 -0800 | [diff] [blame] | 5944 |  *	from the kernel tables. | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 5945 |  *	If head not NULL, device is queued to be unregistered later. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5946 |  * | 
 | 5947 |  *	Callers must hold the rtnl semaphore.  You may want | 
 | 5948 |  *	unregister_netdev() instead of this. | 
 | 5949 |  */ | 
 | 5950 |  | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 5951 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5952 | { | 
| Herbert Xu | a662071 | 2007-12-12 19:21:56 -0800 | [diff] [blame] | 5953 | 	ASSERT_RTNL(); | 
 | 5954 |  | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 5955 | 	if (head) { | 
| Eric W. Biederman | 9fdce09 | 2009-10-30 14:51:13 +0000 | [diff] [blame] | 5956 | 		list_move_tail(&dev->unreg_list, head); | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 5957 | 	} else { | 
 | 5958 | 		rollback_registered(dev); | 
 | 5959 | 		/* Finish processing unregister after unlock */ | 
 | 5960 | 		net_set_todo(dev); | 
 | 5961 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5962 | } | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 5963 | EXPORT_SYMBOL(unregister_netdevice_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5964 |  | 
 | 5965 | /** | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5966 |  *	unregister_netdevice_many - unregister many devices | 
 | 5967 |  *	@head: list of devices | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5968 |  */ | 
 | 5969 | void unregister_netdevice_many(struct list_head *head) | 
 | 5970 | { | 
 | 5971 | 	struct net_device *dev; | 
 | 5972 |  | 
 | 5973 | 	if (!list_empty(head)) { | 
 | 5974 | 		rollback_registered_many(head); | 
 | 5975 | 		list_for_each_entry(dev, head, unreg_list) | 
 | 5976 | 			net_set_todo(dev); | 
 | 5977 | 	} | 
 | 5978 | } | 
| Eric Dumazet | 63c8099 | 2009-10-27 07:06:49 +0000 | [diff] [blame] | 5979 | EXPORT_SYMBOL(unregister_netdevice_many); | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 5980 |  | 
 | 5981 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5982 |  *	unregister_netdev - remove device from the kernel | 
 | 5983 |  *	@dev: device | 
 | 5984 |  * | 
 | 5985 |  *	This function shuts down a device interface and removes it | 
| Wang Chen | d59b54b | 2007-12-11 02:28:03 -0800 | [diff] [blame] | 5986 |  *	from the kernel tables. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5987 |  * | 
 | 5988 |  *	This is just a wrapper for unregister_netdevice that takes | 
 | 5989 |  *	the rtnl semaphore.  In general you want to use this and not | 
 | 5990 |  *	unregister_netdevice. | 
 | 5991 |  */ | 
 | 5992 | void unregister_netdev(struct net_device *dev) | 
 | 5993 | { | 
 | 5994 | 	rtnl_lock(); | 
 | 5995 | 	unregister_netdevice(dev); | 
 | 5996 | 	rtnl_unlock(); | 
 | 5997 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5998 | EXPORT_SYMBOL(unregister_netdev); | 
 | 5999 |  | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6000 | /** | 
 | 6001 |  *	dev_change_net_namespace - move device to different nethost namespace | 
 | 6002 |  *	@dev: device | 
 | 6003 |  *	@net: network namespace | 
 | 6004 |  *	@pat: If not NULL name pattern to try if the current device name | 
 | 6005 |  *	      is already taken in the destination network namespace. | 
 | 6006 |  * | 
 | 6007 |  *	This function shuts down a device interface and moves it | 
 | 6008 |  *	to a new network namespace. On success 0 is returned, on | 
 | 6009 |  *	a failure a netagive errno code is returned. | 
 | 6010 |  * | 
 | 6011 |  *	Callers must hold the rtnl semaphore. | 
 | 6012 |  */ | 
 | 6013 |  | 
 | 6014 | int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) | 
 | 6015 | { | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6016 | 	int err; | 
 | 6017 |  | 
 | 6018 | 	ASSERT_RTNL(); | 
 | 6019 |  | 
 | 6020 | 	/* Don't allow namespace local devices to be moved. */ | 
 | 6021 | 	err = -EINVAL; | 
 | 6022 | 	if (dev->features & NETIF_F_NETNS_LOCAL) | 
 | 6023 | 		goto out; | 
 | 6024 |  | 
 | 6025 | 	/* Ensure the device has been registrered */ | 
 | 6026 | 	err = -EINVAL; | 
 | 6027 | 	if (dev->reg_state != NETREG_REGISTERED) | 
 | 6028 | 		goto out; | 
 | 6029 |  | 
 | 6030 | 	/* Get out if there is nothing todo */ | 
 | 6031 | 	err = 0; | 
| YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 6032 | 	if (net_eq(dev_net(dev), net)) | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6033 | 		goto out; | 
 | 6034 |  | 
 | 6035 | 	/* Pick the destination device name, and ensure | 
 | 6036 | 	 * we can use it in the destination network namespace. | 
 | 6037 | 	 */ | 
 | 6038 | 	err = -EEXIST; | 
| Octavian Purdila | d903102 | 2009-11-18 02:36:59 +0000 | [diff] [blame] | 6039 | 	if (__dev_get_by_name(net, dev->name)) { | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6040 | 		/* We get here if we can't use the current device name */ | 
 | 6041 | 		if (!pat) | 
 | 6042 | 			goto out; | 
| Daniel Lezcano | 8ce6cebc | 2010-05-19 10:12:19 +0000 | [diff] [blame] | 6043 | 		if (dev_get_valid_name(dev, pat, 1)) | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6044 | 			goto out; | 
 | 6045 | 	} | 
 | 6046 |  | 
 | 6047 | 	/* | 
 | 6048 | 	 * And now a mini version of register_netdevice unregister_netdevice. | 
 | 6049 | 	 */ | 
 | 6050 |  | 
 | 6051 | 	/* If device is running close it first. */ | 
| Pavel Emelyanov | 9b77265 | 2007-10-10 02:49:09 -0700 | [diff] [blame] | 6052 | 	dev_close(dev); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6053 |  | 
 | 6054 | 	/* And unlink it from device chain */ | 
 | 6055 | 	err = -ENODEV; | 
 | 6056 | 	unlist_netdevice(dev); | 
 | 6057 |  | 
 | 6058 | 	synchronize_net(); | 
 | 6059 |  | 
 | 6060 | 	/* Shutdown queueing discipline. */ | 
 | 6061 | 	dev_shutdown(dev); | 
 | 6062 |  | 
 | 6063 | 	/* Notify protocols, that we are about to destroy | 
 | 6064 | 	   this device. They should clean all the things. | 
| David Lamparter | 3b27e10 | 2010-09-17 03:22:19 +0000 | [diff] [blame] | 6065 |  | 
 | 6066 | 	   Note that dev->reg_state stays at NETREG_REGISTERED. | 
 | 6067 | 	   This is wanted because this way 8021q and macvlan know | 
 | 6068 | 	   the device is just moving and can keep their slaves up. | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6069 | 	*/ | 
 | 6070 | 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 
| Eric W. Biederman | a5ee155 | 2009-11-29 15:45:58 +0000 | [diff] [blame] | 6071 | 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6072 |  | 
 | 6073 | 	/* | 
 | 6074 | 	 *	Flush the unicast and multicast chains | 
 | 6075 | 	 */ | 
| Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 6076 | 	dev_uc_flush(dev); | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 6077 | 	dev_mc_flush(dev); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6078 |  | 
 | 6079 | 	/* Actually switch the network namespace */ | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 6080 | 	dev_net_set(dev, net); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6081 |  | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6082 | 	/* If there is an ifindex conflict assign a new one */ | 
 | 6083 | 	if (__dev_get_by_index(net, dev->ifindex)) { | 
 | 6084 | 		int iflink = (dev->iflink == dev->ifindex); | 
 | 6085 | 		dev->ifindex = dev_new_index(net); | 
 | 6086 | 		if (iflink) | 
 | 6087 | 			dev->iflink = dev->ifindex; | 
 | 6088 | 	} | 
 | 6089 |  | 
| Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 6090 | 	/* Fixup kobjects */ | 
| Eric W. Biederman | a1b3f59 | 2010-05-04 17:36:49 -0700 | [diff] [blame] | 6091 | 	err = device_rename(&dev->dev, dev->name); | 
| Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 6092 | 	WARN_ON(err); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6093 |  | 
 | 6094 | 	/* Add the device back in the hashes */ | 
 | 6095 | 	list_netdevice(dev); | 
 | 6096 |  | 
 | 6097 | 	/* Notify protocols, that a new device appeared. */ | 
 | 6098 | 	call_netdevice_notifiers(NETDEV_REGISTER, dev); | 
 | 6099 |  | 
| Eric W. Biederman | d90a909 | 2009-12-12 22:11:15 +0000 | [diff] [blame] | 6100 | 	/* | 
 | 6101 | 	 *	Prevent userspace races by waiting until the network | 
 | 6102 | 	 *	device is fully setup before sending notifications. | 
 | 6103 | 	 */ | 
 | 6104 | 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | 
 | 6105 |  | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6106 | 	synchronize_net(); | 
 | 6107 | 	err = 0; | 
 | 6108 | out: | 
 | 6109 | 	return err; | 
 | 6110 | } | 
| Johannes Berg | 463d018 | 2009-07-14 00:33:35 +0200 | [diff] [blame] | 6111 | EXPORT_SYMBOL_GPL(dev_change_net_namespace); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6112 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6113 | static int dev_cpu_callback(struct notifier_block *nfb, | 
 | 6114 | 			    unsigned long action, | 
 | 6115 | 			    void *ocpu) | 
 | 6116 | { | 
 | 6117 | 	struct sk_buff **list_skb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6118 | 	struct sk_buff *skb; | 
 | 6119 | 	unsigned int cpu, oldcpu = (unsigned long)ocpu; | 
 | 6120 | 	struct softnet_data *sd, *oldsd; | 
 | 6121 |  | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 6122 | 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6123 | 		return NOTIFY_OK; | 
 | 6124 |  | 
 | 6125 | 	local_irq_disable(); | 
 | 6126 | 	cpu = smp_processor_id(); | 
 | 6127 | 	sd = &per_cpu(softnet_data, cpu); | 
 | 6128 | 	oldsd = &per_cpu(softnet_data, oldcpu); | 
 | 6129 |  | 
 | 6130 | 	/* Find end of our completion_queue. */ | 
 | 6131 | 	list_skb = &sd->completion_queue; | 
 | 6132 | 	while (*list_skb) | 
 | 6133 | 		list_skb = &(*list_skb)->next; | 
 | 6134 | 	/* Append completion queue from offline CPU. */ | 
 | 6135 | 	*list_skb = oldsd->completion_queue; | 
 | 6136 | 	oldsd->completion_queue = NULL; | 
 | 6137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6138 | 	/* Append output queue from offline CPU. */ | 
| Changli Gao | a9cbd58 | 2010-04-26 23:06:24 +0000 | [diff] [blame] | 6139 | 	if (oldsd->output_queue) { | 
 | 6140 | 		*sd->output_queue_tailp = oldsd->output_queue; | 
 | 6141 | 		sd->output_queue_tailp = oldsd->output_queue_tailp; | 
 | 6142 | 		oldsd->output_queue = NULL; | 
 | 6143 | 		oldsd->output_queue_tailp = &oldsd->output_queue; | 
 | 6144 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6145 |  | 
 | 6146 | 	raise_softirq_irqoff(NET_TX_SOFTIRQ); | 
 | 6147 | 	local_irq_enable(); | 
 | 6148 |  | 
 | 6149 | 	/* Process offline CPU's input_pkt_queue */ | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 6150 | 	while ((skb = __skb_dequeue(&oldsd->process_queue))) { | 
 | 6151 | 		netif_rx(skb); | 
 | 6152 | 		input_queue_head_incr(oldsd); | 
 | 6153 | 	} | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 6154 | 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6155 | 		netif_rx(skb); | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 6156 | 		input_queue_head_incr(oldsd); | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 6157 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6158 |  | 
 | 6159 | 	return NOTIFY_OK; | 
 | 6160 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6161 |  | 
 | 6162 |  | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6163 | /** | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6164 |  *	netdev_increment_features - increment feature set by one | 
 | 6165 |  *	@all: current feature set | 
 | 6166 |  *	@one: new feature set | 
 | 6167 |  *	@mask: mask feature set | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6168 |  * | 
 | 6169 |  *	Computes a new feature set after adding a device with feature set | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6170 |  *	@one to the master device with current feature set @all.  Will not | 
 | 6171 |  *	enable anything that is off in @mask. Returns the new feature set. | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6172 |  */ | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 6173 | u32 netdev_increment_features(u32 all, u32 one, u32 mask) | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6174 | { | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6175 | 	/* If device needs checksumming, downgrade to it. */ | 
| Eric Dumazet | d1b19df | 2009-09-03 01:29:39 -0700 | [diff] [blame] | 6176 | 	if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6177 | 		all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); | 
 | 6178 | 	else if (mask & NETIF_F_ALL_CSUM) { | 
 | 6179 | 		/* If one device supports v4/v6 checksumming, set for all. */ | 
 | 6180 | 		if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) && | 
 | 6181 | 		    !(all & NETIF_F_GEN_CSUM)) { | 
 | 6182 | 			all &= ~NETIF_F_ALL_CSUM; | 
 | 6183 | 			all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | 
 | 6184 | 		} | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6185 |  | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6186 | 		/* If one device supports hw checksumming, set for all. */ | 
 | 6187 | 		if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { | 
 | 6188 | 			all &= ~NETIF_F_ALL_CSUM; | 
 | 6189 | 			all |= NETIF_F_HW_CSUM; | 
 | 6190 | 		} | 
 | 6191 | 	} | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6192 |  | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6193 | 	one |= NETIF_F_ALL_CSUM; | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6194 |  | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6195 | 	one |= all & NETIF_F_ONE_FOR_ALL; | 
| Sridhar Samudrala | d9f5950 | 2009-10-07 12:24:25 +0000 | [diff] [blame] | 6196 | 	all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6197 | 	all |= one & mask & NETIF_F_ONE_FOR_ALL; | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6198 |  | 
 | 6199 | 	return all; | 
 | 6200 | } | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 6201 | EXPORT_SYMBOL(netdev_increment_features); | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 6202 |  | 
| Pavel Emelyanov | 30d97d3 | 2007-09-16 15:40:33 -0700 | [diff] [blame] | 6203 | static struct hlist_head *netdev_create_hash(void) | 
 | 6204 | { | 
 | 6205 | 	int i; | 
 | 6206 | 	struct hlist_head *hash; | 
 | 6207 |  | 
 | 6208 | 	hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); | 
 | 6209 | 	if (hash != NULL) | 
 | 6210 | 		for (i = 0; i < NETDEV_HASHENTRIES; i++) | 
 | 6211 | 			INIT_HLIST_HEAD(&hash[i]); | 
 | 6212 |  | 
 | 6213 | 	return hash; | 
 | 6214 | } | 
 | 6215 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6216 | /* Initialize per network namespace state */ | 
| Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 6217 | static int __net_init netdev_init(struct net *net) | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6218 | { | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6219 | 	INIT_LIST_HEAD(&net->dev_base_head); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6220 |  | 
| Pavel Emelyanov | 30d97d3 | 2007-09-16 15:40:33 -0700 | [diff] [blame] | 6221 | 	net->dev_name_head = netdev_create_hash(); | 
 | 6222 | 	if (net->dev_name_head == NULL) | 
 | 6223 | 		goto err_name; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6224 |  | 
| Pavel Emelyanov | 30d97d3 | 2007-09-16 15:40:33 -0700 | [diff] [blame] | 6225 | 	net->dev_index_head = netdev_create_hash(); | 
 | 6226 | 	if (net->dev_index_head == NULL) | 
 | 6227 | 		goto err_idx; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6228 |  | 
 | 6229 | 	return 0; | 
| Pavel Emelyanov | 30d97d3 | 2007-09-16 15:40:33 -0700 | [diff] [blame] | 6230 |  | 
 | 6231 | err_idx: | 
 | 6232 | 	kfree(net->dev_name_head); | 
 | 6233 | err_name: | 
 | 6234 | 	return -ENOMEM; | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6235 | } | 
 | 6236 |  | 
| Stephen Hemminger | f0db275 | 2008-09-30 02:23:58 -0700 | [diff] [blame] | 6237 | /** | 
 | 6238 |  *	netdev_drivername - network driver for the device | 
 | 6239 |  *	@dev: network device | 
 | 6240 |  *	@buffer: buffer for resulting name | 
 | 6241 |  *	@len: size of buffer | 
 | 6242 |  * | 
 | 6243 |  *	Determine network driver for device. | 
 | 6244 |  */ | 
| Stephen Hemminger | cf04a4c7 | 2008-09-30 02:22:14 -0700 | [diff] [blame] | 6245 | char *netdev_drivername(const struct net_device *dev, char *buffer, int len) | 
| Arjan van de Ven | 6579e57 | 2008-07-21 13:31:48 -0700 | [diff] [blame] | 6246 | { | 
| Stephen Hemminger | cf04a4c7 | 2008-09-30 02:22:14 -0700 | [diff] [blame] | 6247 | 	const struct device_driver *driver; | 
 | 6248 | 	const struct device *parent; | 
| Arjan van de Ven | 6579e57 | 2008-07-21 13:31:48 -0700 | [diff] [blame] | 6249 |  | 
 | 6250 | 	if (len <= 0 || !buffer) | 
 | 6251 | 		return buffer; | 
 | 6252 | 	buffer[0] = 0; | 
 | 6253 |  | 
 | 6254 | 	parent = dev->dev.parent; | 
 | 6255 |  | 
 | 6256 | 	if (!parent) | 
 | 6257 | 		return buffer; | 
 | 6258 |  | 
 | 6259 | 	driver = parent->driver; | 
 | 6260 | 	if (driver && driver->name) | 
 | 6261 | 		strlcpy(buffer, driver->name, len); | 
 | 6262 | 	return buffer; | 
 | 6263 | } | 
 | 6264 |  | 
| Joe Perches | 256df2f | 2010-06-27 01:02:35 +0000 | [diff] [blame] | 6265 | static int __netdev_printk(const char *level, const struct net_device *dev, | 
 | 6266 | 			   struct va_format *vaf) | 
 | 6267 | { | 
 | 6268 | 	int r; | 
 | 6269 |  | 
 | 6270 | 	if (dev && dev->dev.parent) | 
 | 6271 | 		r = dev_printk(level, dev->dev.parent, "%s: %pV", | 
 | 6272 | 			       netdev_name(dev), vaf); | 
 | 6273 | 	else if (dev) | 
 | 6274 | 		r = printk("%s%s: %pV", level, netdev_name(dev), vaf); | 
 | 6275 | 	else | 
 | 6276 | 		r = printk("%s(NULL net_device): %pV", level, vaf); | 
 | 6277 |  | 
 | 6278 | 	return r; | 
 | 6279 | } | 
 | 6280 |  | 
 | 6281 | int netdev_printk(const char *level, const struct net_device *dev, | 
 | 6282 | 		  const char *format, ...) | 
 | 6283 | { | 
 | 6284 | 	struct va_format vaf; | 
 | 6285 | 	va_list args; | 
 | 6286 | 	int r; | 
 | 6287 |  | 
 | 6288 | 	va_start(args, format); | 
 | 6289 |  | 
 | 6290 | 	vaf.fmt = format; | 
 | 6291 | 	vaf.va = &args; | 
 | 6292 |  | 
 | 6293 | 	r = __netdev_printk(level, dev, &vaf); | 
 | 6294 | 	va_end(args); | 
 | 6295 |  | 
 | 6296 | 	return r; | 
 | 6297 | } | 
 | 6298 | EXPORT_SYMBOL(netdev_printk); | 
 | 6299 |  | 
 | 6300 | #define define_netdev_printk_level(func, level)			\ | 
 | 6301 | int func(const struct net_device *dev, const char *fmt, ...)	\ | 
 | 6302 | {								\ | 
 | 6303 | 	int r;							\ | 
 | 6304 | 	struct va_format vaf;					\ | 
 | 6305 | 	va_list args;						\ | 
 | 6306 | 								\ | 
 | 6307 | 	va_start(args, fmt);					\ | 
 | 6308 | 								\ | 
 | 6309 | 	vaf.fmt = fmt;						\ | 
 | 6310 | 	vaf.va = &args;						\ | 
 | 6311 | 								\ | 
 | 6312 | 	r = __netdev_printk(level, dev, &vaf);			\ | 
 | 6313 | 	va_end(args);						\ | 
 | 6314 | 								\ | 
 | 6315 | 	return r;						\ | 
 | 6316 | }								\ | 
 | 6317 | EXPORT_SYMBOL(func); | 
 | 6318 |  | 
 | 6319 | define_netdev_printk_level(netdev_emerg, KERN_EMERG); | 
 | 6320 | define_netdev_printk_level(netdev_alert, KERN_ALERT); | 
 | 6321 | define_netdev_printk_level(netdev_crit, KERN_CRIT); | 
 | 6322 | define_netdev_printk_level(netdev_err, KERN_ERR); | 
 | 6323 | define_netdev_printk_level(netdev_warn, KERN_WARNING); | 
 | 6324 | define_netdev_printk_level(netdev_notice, KERN_NOTICE); | 
 | 6325 | define_netdev_printk_level(netdev_info, KERN_INFO); | 
 | 6326 |  | 
| Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 6327 | static void __net_exit netdev_exit(struct net *net) | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6328 | { | 
 | 6329 | 	kfree(net->dev_name_head); | 
 | 6330 | 	kfree(net->dev_index_head); | 
 | 6331 | } | 
 | 6332 |  | 
| Denis V. Lunev | 022cbae | 2007-11-13 03:23:50 -0800 | [diff] [blame] | 6333 | static struct pernet_operations __net_initdata netdev_net_ops = { | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6334 | 	.init = netdev_init, | 
 | 6335 | 	.exit = netdev_exit, | 
 | 6336 | }; | 
 | 6337 |  | 
| Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 6338 | static void __net_exit default_device_exit(struct net *net) | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6339 | { | 
| Eric W. Biederman | e008b5f | 2009-11-29 22:25:30 +0000 | [diff] [blame] | 6340 | 	struct net_device *dev, *aux; | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6341 | 	/* | 
| Eric W. Biederman | e008b5f | 2009-11-29 22:25:30 +0000 | [diff] [blame] | 6342 | 	 * Push all migratable network devices back to the | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6343 | 	 * initial network namespace | 
 | 6344 | 	 */ | 
 | 6345 | 	rtnl_lock(); | 
| Eric W. Biederman | e008b5f | 2009-11-29 22:25:30 +0000 | [diff] [blame] | 6346 | 	for_each_netdev_safe(net, dev, aux) { | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6347 | 		int err; | 
| Pavel Emelyanov | aca5139 | 2008-05-08 01:24:25 -0700 | [diff] [blame] | 6348 | 		char fb_name[IFNAMSIZ]; | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6349 |  | 
 | 6350 | 		/* Ignore unmoveable devices (i.e. loopback) */ | 
 | 6351 | 		if (dev->features & NETIF_F_NETNS_LOCAL) | 
 | 6352 | 			continue; | 
 | 6353 |  | 
| Eric W. Biederman | e008b5f | 2009-11-29 22:25:30 +0000 | [diff] [blame] | 6354 | 		/* Leave virtual devices for the generic cleanup */ | 
 | 6355 | 		if (dev->rtnl_link_ops) | 
 | 6356 | 			continue; | 
| Eric W. Biederman | d0c082c | 2008-11-05 15:59:38 -0800 | [diff] [blame] | 6357 |  | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6358 | 		/* Push remaing network devices to init_net */ | 
| Pavel Emelyanov | aca5139 | 2008-05-08 01:24:25 -0700 | [diff] [blame] | 6359 | 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); | 
 | 6360 | 		err = dev_change_net_namespace(dev, &init_net, fb_name); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6361 | 		if (err) { | 
| Pavel Emelyanov | aca5139 | 2008-05-08 01:24:25 -0700 | [diff] [blame] | 6362 | 			printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6363 | 				__func__, dev->name, err); | 
| Pavel Emelyanov | aca5139 | 2008-05-08 01:24:25 -0700 | [diff] [blame] | 6364 | 			BUG(); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6365 | 		} | 
 | 6366 | 	} | 
 | 6367 | 	rtnl_unlock(); | 
 | 6368 | } | 
 | 6369 |  | 
| Eric W. Biederman | 04dc7f6b | 2009-12-03 02:29:04 +0000 | [diff] [blame] | 6370 | static void __net_exit default_device_exit_batch(struct list_head *net_list) | 
 | 6371 | { | 
 | 6372 | 	/* At exit all network devices most be removed from a network | 
| Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 6373 | 	 * namespace.  Do this in the reverse order of registration. | 
| Eric W. Biederman | 04dc7f6b | 2009-12-03 02:29:04 +0000 | [diff] [blame] | 6374 | 	 * Do this across as many network namespaces as possible to | 
 | 6375 | 	 * improve batching efficiency. | 
 | 6376 | 	 */ | 
 | 6377 | 	struct net_device *dev; | 
 | 6378 | 	struct net *net; | 
 | 6379 | 	LIST_HEAD(dev_kill_list); | 
 | 6380 |  | 
 | 6381 | 	rtnl_lock(); | 
 | 6382 | 	list_for_each_entry(net, net_list, exit_list) { | 
 | 6383 | 		for_each_netdev_reverse(net, dev) { | 
 | 6384 | 			if (dev->rtnl_link_ops) | 
 | 6385 | 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list); | 
 | 6386 | 			else | 
 | 6387 | 				unregister_netdevice_queue(dev, &dev_kill_list); | 
 | 6388 | 		} | 
 | 6389 | 	} | 
 | 6390 | 	unregister_netdevice_many(&dev_kill_list); | 
 | 6391 | 	rtnl_unlock(); | 
 | 6392 | } | 
 | 6393 |  | 
| Denis V. Lunev | 022cbae | 2007-11-13 03:23:50 -0800 | [diff] [blame] | 6394 | static struct pernet_operations __net_initdata default_device_ops = { | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6395 | 	.exit = default_device_exit, | 
| Eric W. Biederman | 04dc7f6b | 2009-12-03 02:29:04 +0000 | [diff] [blame] | 6396 | 	.exit_batch = default_device_exit_batch, | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 6397 | }; | 
 | 6398 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6399 | /* | 
 | 6400 |  *	Initialize the DEV module. At boot time this walks the device list and | 
 | 6401 |  *	unhooks any devices that fail to initialise (normally hardware not | 
 | 6402 |  *	present) and leaves us with a valid list of present and active devices. | 
 | 6403 |  * | 
 | 6404 |  */ | 
 | 6405 |  | 
 | 6406 | /* | 
 | 6407 |  *       This is called single threaded during boot, so no need | 
 | 6408 |  *       to take the rtnl semaphore. | 
 | 6409 |  */ | 
 | 6410 | static int __init net_dev_init(void) | 
 | 6411 | { | 
 | 6412 | 	int i, rc = -ENOMEM; | 
 | 6413 |  | 
 | 6414 | 	BUG_ON(!dev_boot_phase); | 
 | 6415 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6416 | 	if (dev_proc_init()) | 
 | 6417 | 		goto out; | 
 | 6418 |  | 
| Eric W. Biederman | 8b41d18 | 2007-09-26 22:02:53 -0700 | [diff] [blame] | 6419 | 	if (netdev_kobject_init()) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6420 | 		goto out; | 
 | 6421 |  | 
 | 6422 | 	INIT_LIST_HEAD(&ptype_all); | 
| Pavel Emelyanov | 82d8a867 | 2007-11-26 20:12:58 +0800 | [diff] [blame] | 6423 | 	for (i = 0; i < PTYPE_HASH_SIZE; i++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6424 | 		INIT_LIST_HEAD(&ptype_base[i]); | 
 | 6425 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 6426 | 	if (register_pernet_subsys(&netdev_net_ops)) | 
 | 6427 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6428 |  | 
 | 6429 | 	/* | 
 | 6430 | 	 *	Initialise the packet receive queues. | 
 | 6431 | 	 */ | 
 | 6432 |  | 
| KAMEZAWA Hiroyuki | 6f91204 | 2006-04-10 22:52:50 -0700 | [diff] [blame] | 6433 | 	for_each_possible_cpu(i) { | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 6434 | 		struct softnet_data *sd = &per_cpu(softnet_data, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6435 |  | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 6436 | 		memset(sd, 0, sizeof(*sd)); | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 6437 | 		skb_queue_head_init(&sd->input_pkt_queue); | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 6438 | 		skb_queue_head_init(&sd->process_queue); | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 6439 | 		sd->completion_queue = NULL; | 
 | 6440 | 		INIT_LIST_HEAD(&sd->poll_list); | 
| Changli Gao | a9cbd58 | 2010-04-26 23:06:24 +0000 | [diff] [blame] | 6441 | 		sd->output_queue = NULL; | 
 | 6442 | 		sd->output_queue_tailp = &sd->output_queue; | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 6443 | #ifdef CONFIG_RPS | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 6444 | 		sd->csd.func = rps_trigger_softirq; | 
 | 6445 | 		sd->csd.info = sd; | 
 | 6446 | 		sd->csd.flags = 0; | 
 | 6447 | 		sd->cpu = i; | 
| Tom Herbert | 1e94d72 | 2010-03-18 17:45:44 -0700 | [diff] [blame] | 6448 | #endif | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 6449 |  | 
| Eric Dumazet | e36fa2f | 2010-04-19 21:17:14 +0000 | [diff] [blame] | 6450 | 		sd->backlog.poll = process_backlog; | 
 | 6451 | 		sd->backlog.weight = weight_p; | 
 | 6452 | 		sd->backlog.gro_list = NULL; | 
 | 6453 | 		sd->backlog.gro_count = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6454 | 	} | 
 | 6455 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6456 | 	dev_boot_phase = 0; | 
 | 6457 |  | 
| Eric W. Biederman | 505d4f7 | 2008-11-07 22:54:20 -0800 | [diff] [blame] | 6458 | 	/* The loopback device is special if any other network devices | 
 | 6459 | 	 * is present in a network namespace the loopback device must | 
 | 6460 | 	 * be present. Since we now dynamically allocate and free the | 
 | 6461 | 	 * loopback device ensure this invariant is maintained by | 
 | 6462 | 	 * keeping the loopback device as the first device on the | 
 | 6463 | 	 * list of network devices.  Ensuring the loopback devices | 
 | 6464 | 	 * is the first device that appears and the last network device | 
 | 6465 | 	 * that disappears. | 
 | 6466 | 	 */ | 
 | 6467 | 	if (register_pernet_device(&loopback_net_ops)) | 
 | 6468 | 		goto out; | 
 | 6469 |  | 
 | 6470 | 	if (register_pernet_device(&default_device_ops)) | 
 | 6471 | 		goto out; | 
 | 6472 |  | 
| Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 6473 | 	open_softirq(NET_TX_SOFTIRQ, net_tx_action); | 
 | 6474 | 	open_softirq(NET_RX_SOFTIRQ, net_rx_action); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6475 |  | 
 | 6476 | 	hotcpu_notifier(dev_cpu_callback, 0); | 
 | 6477 | 	dst_init(); | 
 | 6478 | 	dev_mcast_init(); | 
 | 6479 | 	rc = 0; | 
 | 6480 | out: | 
 | 6481 | 	return rc; | 
 | 6482 | } | 
 | 6483 |  | 
 | 6484 | subsys_initcall(net_dev_init); | 
 | 6485 |  | 
| Krishna Kumar | e88721f | 2009-02-18 17:55:02 -0800 | [diff] [blame] | 6486 | static int __init initialize_hashrnd(void) | 
 | 6487 | { | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 6488 | 	get_random_bytes(&hashrnd, sizeof(hashrnd)); | 
| Krishna Kumar | e88721f | 2009-02-18 17:55:02 -0800 | [diff] [blame] | 6489 | 	return 0; | 
 | 6490 | } | 
 | 6491 |  | 
 | 6492 | late_initcall_sync(initialize_hashrnd); | 
 | 6493 |  |