| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
 | 3 |  *		operating system.  INET is implemented using the  BSD Socket | 
 | 4 |  *		interface as the means of communication with the user level. | 
 | 5 |  * | 
 | 6 |  *		Definitions for the Interfaces handler. | 
 | 7 |  * | 
 | 8 |  * Version:	@(#)dev.h	1.0.10	08/12/93 | 
 | 9 |  * | 
| Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 |  * Authors:	Ross Biro | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
 | 12 |  *		Corey Minyard <wf-rch!minyard@relay.EU.net> | 
 | 13 |  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | 
| Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 14 |  *		Alan Cox, <alan@lxorguk.ukuu.org.uk> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  *		Bjorn Ekwall. <bj0rn@blox.se> | 
 | 16 |  *              Pekka Riikonen <priikone@poseidon.pspt.fi> | 
 | 17 |  * | 
 | 18 |  *		This program is free software; you can redistribute it and/or | 
 | 19 |  *		modify it under the terms of the GNU General Public License | 
 | 20 |  *		as published by the Free Software Foundation; either version | 
 | 21 |  *		2 of the License, or (at your option) any later version. | 
 | 22 |  * | 
 | 23 |  *		Moved to /usr/include/linux for NET3 | 
 | 24 |  */ | 
 | 25 | #ifndef _LINUX_NETDEVICE_H | 
 | 26 | #define _LINUX_NETDEVICE_H | 
 | 27 |  | 
 | 28 | #include <linux/if.h> | 
 | 29 | #include <linux/if_ether.h> | 
 | 30 | #include <linux/if_packet.h> | 
| Williams, Mitch A | 95c26df | 2010-02-10 01:43:46 +0000 | [diff] [blame] | 31 | #include <linux/if_link.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
 | 33 | #ifdef __KERNEL__ | 
| Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 34 | #include <linux/pm_qos_params.h> | 
| Al Viro | d7fe0f2 | 2006-12-03 23:15:30 -0500 | [diff] [blame] | 35 | #include <linux/timer.h> | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 36 | #include <linux/delay.h> | 
| Dmitri Vorobiev | cc0be32 | 2009-03-27 15:55:36 -0700 | [diff] [blame] | 37 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <asm/atomic.h> | 
 | 39 | #include <asm/cache.h> | 
 | 40 | #include <asm/byteorder.h> | 
 | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <linux/device.h> | 
 | 43 | #include <linux/percpu.h> | 
| David S. Miller | 4d5b78c | 2009-05-06 16:52:51 -0700 | [diff] [blame] | 44 | #include <linux/rculist.h> | 
| Chris Leech | db21733 | 2006-06-17 21:24:58 -0700 | [diff] [blame] | 45 | #include <linux/dmaengine.h> | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 46 | #include <linux/workqueue.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
| Patrick McHardy | b1b67dd | 2009-04-20 04:49:28 +0000 | [diff] [blame] | 48 | #include <linux/ethtool.h> | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 49 | #include <net/net_namespace.h> | 
| Lennert Buytenhek | cf85d08 | 2008-10-07 13:45:02 +0000 | [diff] [blame] | 50 | #include <net/dsa.h> | 
| Jeff Kirsher | 7a6b6f5 | 2008-11-25 01:02:08 -0800 | [diff] [blame] | 51 | #ifdef CONFIG_DCB | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 52 | #include <net/dcbnl.h> | 
 | 53 | #endif | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 54 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | struct vlan_group; | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 56 | struct netpoll_info; | 
| Richard Cochran | c1f19b5 | 2010-07-17 08:49:36 +0000 | [diff] [blame] | 57 | struct phy_device; | 
| Johannes Berg | 704232c | 2007-04-23 12:20:05 -0700 | [diff] [blame] | 58 | /* 802.11 specific */ | 
 | 59 | struct wireless_dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | 					/* source back-compat hooks */ | 
 | 61 | #define SET_ETHTOOL_OPS(netdev,ops) \ | 
 | 62 | 	( (netdev)->ethtool_ops = (ops) ) | 
 | 63 |  | 
 | 64 | #define HAVE_ALLOC_NETDEV		/* feature macro: alloc_xxxdev | 
 | 65 | 					   functions are available. */ | 
 | 66 | #define HAVE_FREE_NETDEV		/* free_netdev() */ | 
 | 67 | #define HAVE_NETDEV_PRIV		/* netdev_priv() */ | 
 | 68 |  | 
| Stefan Assmann | c1f7942 | 2010-07-22 02:50:21 +0000 | [diff] [blame] | 69 | /* hardware address assignment types */ | 
 | 70 | #define NET_ADDR_PERM		0	/* address is permanent (default) */ | 
 | 71 | #define NET_ADDR_RANDOM		1	/* address is generated randomly */ | 
 | 72 | #define NET_ADDR_STOLEN		2	/* address is stolen from other device */ | 
 | 73 |  | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 74 | /* Backlog congestion levels */ | 
 | 75 | #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */ | 
 | 76 | #define NET_RX_DROP		1	/* packet dropped */ | 
 | 77 |  | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 78 | /* Initial net device group. All devices belong to group 0 by default. */ | 
 | 79 | #define INIT_NETDEV_GROUP	0 | 
 | 80 |  | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 81 | /* | 
 | 82 |  * Transmit return codes: transmit return codes originate from three different | 
 | 83 |  * namespaces: | 
 | 84 |  * | 
 | 85 |  * - qdisc return codes | 
 | 86 |  * - driver transmit return codes | 
 | 87 |  * - errno values | 
 | 88 |  * | 
 | 89 |  * Drivers are allowed to return any one of those in their hard_start_xmit() | 
 | 90 |  * function. Real network devices commonly used with qdiscs should only return | 
 | 91 |  * the driver transmit return codes though - when qdiscs are used, the actual | 
 | 92 |  * transmission happens asynchronously, so the value is not propagated to | 
 | 93 |  * higher layers. Virtual network devices transmit synchronously, in this case | 
 | 94 |  * the driver transmit return codes are consumed by dev_queue_xmit(), all | 
 | 95 |  * others are propagated to higher layers. | 
 | 96 |  */ | 
 | 97 |  | 
 | 98 | /* qdisc ->enqueue() return codes. */ | 
 | 99 | #define NET_XMIT_SUCCESS	0x00 | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 100 | #define NET_XMIT_DROP		0x01	/* skb dropped			*/ | 
 | 101 | #define NET_XMIT_CN		0x02	/* congestion notification	*/ | 
 | 102 | #define NET_XMIT_POLICED	0x03	/* skb is shot by police	*/ | 
 | 103 | #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
| Gerrit Renker | b9df3cb | 2006-11-14 11:21:36 -0200 | [diff] [blame] | 105 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It | 
 | 106 |  * indicates that the device will soon be dropping packets, or already drops | 
 | 107 |  * some packets of the same priority; prompting us to send less aggressively. */ | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 108 | #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0) | 
 | 110 |  | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 111 | /* Driver transmit return codes */ | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 112 | #define NETDEV_TX_MASK		0xf0 | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 113 |  | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 114 | enum netdev_tx { | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 115 | 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */ | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 116 | 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */ | 
 | 117 | 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/ | 
 | 118 | 	NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */ | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 119 | }; | 
 | 120 | typedef enum netdev_tx netdev_tx_t; | 
 | 121 |  | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 122 | /* | 
 | 123 |  * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | 
 | 124 |  * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | 
 | 125 |  */ | 
 | 126 | static inline bool dev_xmit_complete(int rc) | 
 | 127 | { | 
 | 128 | 	/* | 
 | 129 | 	 * Positive cases with an skb consumed by a driver: | 
 | 130 | 	 * - successful transmission (rc == NETDEV_TX_OK) | 
 | 131 | 	 * - error while transmitting (rc < 0) | 
 | 132 | 	 * - error while queueing to a different device (rc & NET_XMIT_MASK) | 
 | 133 | 	 */ | 
 | 134 | 	if (likely(rc < NET_XMIT_MASK)) | 
 | 135 | 		return true; | 
 | 136 |  | 
 | 137 | 	return false; | 
 | 138 | } | 
 | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | #endif | 
 | 141 |  | 
 | 142 | #define MAX_ADDR_LEN	32		/* Largest hardware address length */ | 
 | 143 |  | 
| Adrian Bunk | c88e6f5 | 2008-06-27 19:54:54 -0700 | [diff] [blame] | 144 | #ifdef  __KERNEL__ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | /* | 
 | 146 |  *	Compute the worst case header length according to the protocols | 
 | 147 |  *	used. | 
 | 148 |  */ | 
| Graf Yang | fe2918b | 2009-02-05 21:26:19 -0800 | [diff] [blame] | 149 |  | 
| John W. Linville | caf66e5 | 2010-02-25 12:02:45 -0500 | [diff] [blame] | 150 | #if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 
| David S. Miller | 8388e3d | 2008-05-12 20:17:33 -0700 | [diff] [blame] | 151 | # if defined(CONFIG_MAC80211_MESH) | 
 | 152 | #  define LL_MAX_HEADER 128 | 
 | 153 | # else | 
 | 154 | #  define LL_MAX_HEADER 96 | 
 | 155 | # endif | 
| Adrian Bunk | c759a6b | 2009-04-27 02:36:20 -0700 | [diff] [blame] | 156 | #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) | 
| David S. Miller | 8388e3d | 2008-05-12 20:17:33 -0700 | [diff] [blame] | 157 | # define LL_MAX_HEADER 48 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | #else | 
| David S. Miller | 8388e3d | 2008-05-12 20:17:33 -0700 | [diff] [blame] | 159 | # define LL_MAX_HEADER 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | #endif | 
 | 161 |  | 
| David S. Miller | e81c7359 | 2006-11-28 20:53:39 -0800 | [diff] [blame] | 162 | #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ | 
 | 163 |     !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \ | 
 | 164 |     !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ | 
 | 165 |     !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | #define MAX_HEADER LL_MAX_HEADER | 
 | 167 | #else | 
 | 168 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 
 | 169 | #endif | 
 | 170 |  | 
 | 171 | /* | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 172 |  *	Old network device statistics. Fields are native words | 
 | 173 |  *	(unsigned long) so they can be read and written atomically. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 |  */ | 
| Graf Yang | fe2918b | 2009-02-05 21:26:19 -0800 | [diff] [blame] | 175 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 176 | struct net_device_stats { | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 177 | 	unsigned long	rx_packets; | 
 | 178 | 	unsigned long	tx_packets; | 
 | 179 | 	unsigned long	rx_bytes; | 
 | 180 | 	unsigned long	tx_bytes; | 
 | 181 | 	unsigned long	rx_errors; | 
 | 182 | 	unsigned long	tx_errors; | 
 | 183 | 	unsigned long	rx_dropped; | 
 | 184 | 	unsigned long	tx_dropped; | 
 | 185 | 	unsigned long	multicast; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | 	unsigned long	collisions; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | 	unsigned long	rx_length_errors; | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 188 | 	unsigned long	rx_over_errors; | 
 | 189 | 	unsigned long	rx_crc_errors; | 
 | 190 | 	unsigned long	rx_frame_errors; | 
 | 191 | 	unsigned long	rx_fifo_errors; | 
 | 192 | 	unsigned long	rx_missed_errors; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | 	unsigned long	tx_aborted_errors; | 
 | 194 | 	unsigned long	tx_carrier_errors; | 
 | 195 | 	unsigned long	tx_fifo_errors; | 
 | 196 | 	unsigned long	tx_heartbeat_errors; | 
 | 197 | 	unsigned long	tx_window_errors; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | 	unsigned long	rx_compressed; | 
 | 199 | 	unsigned long	tx_compressed; | 
 | 200 | }; | 
 | 201 |  | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 202 | #endif  /*  __KERNEL__  */ | 
 | 203 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 |  | 
 | 205 | /* Media selection options. */ | 
 | 206 | enum { | 
 | 207 |         IF_PORT_UNKNOWN = 0, | 
 | 208 |         IF_PORT_10BASE2, | 
 | 209 |         IF_PORT_10BASET, | 
 | 210 |         IF_PORT_AUI, | 
 | 211 |         IF_PORT_100BASET, | 
 | 212 |         IF_PORT_100BASETX, | 
 | 213 |         IF_PORT_100BASEFX | 
 | 214 | }; | 
 | 215 |  | 
 | 216 | #ifdef __KERNEL__ | 
 | 217 |  | 
 | 218 | #include <linux/cache.h> | 
 | 219 | #include <linux/skbuff.h> | 
 | 220 |  | 
 | 221 | struct neighbour; | 
 | 222 | struct neigh_parms; | 
 | 223 | struct sk_buff; | 
 | 224 |  | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 225 | struct netdev_hw_addr { | 
 | 226 | 	struct list_head	list; | 
 | 227 | 	unsigned char		addr[MAX_ADDR_LEN]; | 
 | 228 | 	unsigned char		type; | 
| Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 229 | #define NETDEV_HW_ADDR_T_LAN		1 | 
 | 230 | #define NETDEV_HW_ADDR_T_SAN		2 | 
 | 231 | #define NETDEV_HW_ADDR_T_SLAVE		3 | 
 | 232 | #define NETDEV_HW_ADDR_T_UNICAST	4 | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 233 | #define NETDEV_HW_ADDR_T_MULTICAST	5 | 
| Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 234 | 	bool			synced; | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 235 | 	bool			global_use; | 
| Eric Dumazet | 8f8f103 | 2010-09-19 11:24:02 -0700 | [diff] [blame] | 236 | 	int			refcount; | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 237 | 	struct rcu_head		rcu_head; | 
 | 238 | }; | 
 | 239 |  | 
| Jiri Pirko | 31278e7 | 2009-06-17 01:12:19 +0000 | [diff] [blame] | 240 | struct netdev_hw_addr_list { | 
 | 241 | 	struct list_head	list; | 
 | 242 | 	int			count; | 
 | 243 | }; | 
 | 244 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 245 | #define netdev_hw_addr_list_count(l) ((l)->count) | 
 | 246 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) | 
 | 247 | #define netdev_hw_addr_list_for_each(ha, l) \ | 
 | 248 | 	list_for_each_entry(ha, &(l)->list, list) | 
 | 249 |  | 
 | 250 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) | 
 | 251 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) | 
| Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 252 | #define netdev_for_each_uc_addr(ha, dev) \ | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 253 | 	netdev_hw_addr_list_for_each(ha, &(dev)->uc) | 
| Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 254 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 255 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) | 
 | 256 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) | 
| Pavel Roskin | 18e225f | 2010-04-07 16:40:09 -0700 | [diff] [blame] | 257 | #define netdev_for_each_mc_addr(ha, dev) \ | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 258 | 	netdev_hw_addr_list_for_each(ha, &(dev)->mc) | 
| Jiri Pirko | 6683ece | 2010-02-04 10:22:25 -0800 | [diff] [blame] | 259 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 260 | struct hh_cache { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | 	struct hh_cache *hh_next;	/* Next entry			     */ | 
 | 262 | 	atomic_t	hh_refcnt;	/* number of users                   */ | 
| Eric Dumazet | f049098 | 2006-12-08 00:08:43 -0800 | [diff] [blame] | 263 | /* | 
 | 264 |  * We want hh_output, hh_len, hh_lock and hh_data be a in a separate | 
 | 265 |  * cache line on SMP. | 
 | 266 |  * They are mostly read, but hh_refcnt may be changed quite frequently, | 
 | 267 |  * incurring cache line ping pongs. | 
 | 268 |  */ | 
 | 269 | 	__be16		hh_type ____cacheline_aligned_in_smp; | 
 | 270 | 					/* protocol identifier, f.e ETH_P_IP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 |                                          *  NOTE:  For VLANs, this will be the | 
 | 272 |                                          *  encapuslated type. --BLG | 
 | 273 |                                          */ | 
| Arnaldo Carvalho de Melo | d5c42c0 | 2006-11-27 17:58:02 -0200 | [diff] [blame] | 274 | 	u16		hh_len;		/* length of header */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | 	int		(*hh_output)(struct sk_buff *skb); | 
| Stephen Hemminger | 3644f0c | 2006-12-07 15:08:17 -0800 | [diff] [blame] | 276 | 	seqlock_t	hh_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 |  | 
 | 278 | 	/* cached hardware header; allow for machine alignment needs.        */ | 
 | 279 | #define HH_DATA_MOD	16 | 
 | 280 | #define HH_DATA_OFF(__len) \ | 
| Jiri Benc | 5ba0eac | 2005-06-02 16:48:05 -0700 | [diff] [blame] | 281 | 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | #define HH_DATA_ALIGN(__len) \ | 
 | 283 | 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | 
 | 284 | 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 
 | 285 | }; | 
 | 286 |  | 
| Eric Dumazet | 34d101d | 2010-10-11 09:16:57 -0700 | [diff] [blame] | 287 | static inline void hh_cache_put(struct hh_cache *hh) | 
 | 288 | { | 
 | 289 | 	if (atomic_dec_and_test(&hh->hh_refcnt)) | 
 | 290 | 		kfree(hh); | 
 | 291 | } | 
 | 292 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 
 | 294 |  * Alternative is: | 
 | 295 |  *   dev->hard_header_len ? (dev->hard_header_len + | 
 | 296 |  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | 
 | 297 |  * | 
 | 298 |  * We could use other alignment values, but we must maintain the | 
 | 299 |  * relationship HH alignment <= LL alignment. | 
| Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 300 |  * | 
 | 301 |  * LL_ALLOCATED_SPACE also takes into account the tailroom the device | 
 | 302 |  * may need. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 |  */ | 
 | 304 | #define LL_RESERVED_SPACE(dev) \ | 
| Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 305 | 	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ | 
| Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 307 | 	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 
 | 308 | #define LL_ALLOCATED_SPACE(dev) \ | 
 | 309 | 	((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 |  | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 311 | struct header_ops { | 
 | 312 | 	int	(*create) (struct sk_buff *skb, struct net_device *dev, | 
 | 313 | 			   unsigned short type, const void *daddr, | 
 | 314 | 			   const void *saddr, unsigned len); | 
 | 315 | 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr); | 
 | 316 | 	int	(*rebuild)(struct sk_buff *skb); | 
 | 317 | #define HAVE_HEADER_CACHE | 
 | 318 | 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh); | 
 | 319 | 	void	(*cache_update)(struct hh_cache *hh, | 
 | 320 | 				const struct net_device *dev, | 
 | 321 | 				const unsigned char *haddr); | 
 | 322 | }; | 
 | 323 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | /* These flag bits are private to the generic network queueing | 
 | 325 |  * layer, they may not be explicitly referenced by any other | 
 | 326 |  * code. | 
 | 327 |  */ | 
 | 328 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 329 | enum netdev_state_t { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | 	__LINK_STATE_START, | 
 | 331 | 	__LINK_STATE_PRESENT, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | 	__LINK_STATE_NOCARRIER, | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 333 | 	__LINK_STATE_LINKWATCH_PENDING, | 
 | 334 | 	__LINK_STATE_DORMANT, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | }; | 
 | 336 |  | 
 | 337 |  | 
 | 338 | /* | 
 | 339 |  * This structure holds at boot time configured netdevice settings. They | 
| Graf Yang | fe2918b | 2009-02-05 21:26:19 -0800 | [diff] [blame] | 340 |  * are then used in the device probing. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 |  */ | 
 | 342 | struct netdev_boot_setup { | 
 | 343 | 	char name[IFNAMSIZ]; | 
 | 344 | 	struct ifmap map; | 
 | 345 | }; | 
 | 346 | #define NETDEV_BOOT_SETUP_MAX 8 | 
 | 347 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 348 | extern int __init netdev_boot_setup(char *str); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 |  | 
 | 350 | /* | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 351 |  * Structure for NAPI scheduling similar to tasklet but with weighting | 
 | 352 |  */ | 
 | 353 | struct napi_struct { | 
 | 354 | 	/* The poll_list must only be managed by the entity which | 
 | 355 | 	 * changes the state of the NAPI_STATE_SCHED bit.  This means | 
 | 356 | 	 * whoever atomically sets that bit can add this napi_struct | 
 | 357 | 	 * to the per-cpu poll_list, and whoever clears that bit | 
 | 358 | 	 * can remove from the list right before clearing the bit. | 
 | 359 | 	 */ | 
 | 360 | 	struct list_head	poll_list; | 
 | 361 |  | 
 | 362 | 	unsigned long		state; | 
 | 363 | 	int			weight; | 
 | 364 | 	int			(*poll)(struct napi_struct *, int); | 
 | 365 | #ifdef CONFIG_NETPOLL | 
 | 366 | 	spinlock_t		poll_lock; | 
 | 367 | 	int			poll_owner; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 368 | #endif | 
| Herbert Xu | 4ae5544 | 2009-02-08 18:00:36 +0000 | [diff] [blame] | 369 |  | 
 | 370 | 	unsigned int		gro_count; | 
 | 371 |  | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 372 | 	struct net_device	*dev; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 373 | 	struct list_head	dev_list; | 
 | 374 | 	struct sk_buff		*gro_list; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 375 | 	struct sk_buff		*skb; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 376 | }; | 
 | 377 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 378 | enum { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 379 | 	NAPI_STATE_SCHED,	/* Poll is scheduled */ | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 380 | 	NAPI_STATE_DISABLE,	/* Disable pending */ | 
| Neil Horman | 7b363e4 | 2008-12-09 23:22:26 -0800 | [diff] [blame] | 381 | 	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 382 | }; | 
 | 383 |  | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 384 | enum gro_result { | 
| Herbert Xu | d1c76af | 2009-03-16 10:50:02 -0700 | [diff] [blame] | 385 | 	GRO_MERGED, | 
 | 386 | 	GRO_MERGED_FREE, | 
 | 387 | 	GRO_HELD, | 
 | 388 | 	GRO_NORMAL, | 
 | 389 | 	GRO_DROP, | 
 | 390 | }; | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 391 | typedef enum gro_result gro_result_t; | 
| Herbert Xu | d1c76af | 2009-03-16 10:50:02 -0700 | [diff] [blame] | 392 |  | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 393 | typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb); | 
 | 394 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 395 | extern void __napi_schedule(struct napi_struct *n); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 396 |  | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 397 | static inline int napi_disable_pending(struct napi_struct *n) | 
 | 398 | { | 
 | 399 | 	return test_bit(NAPI_STATE_DISABLE, &n->state); | 
 | 400 | } | 
 | 401 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 402 | /** | 
 | 403 |  *	napi_schedule_prep - check if napi can be scheduled | 
 | 404 |  *	@n: napi context | 
 | 405 |  * | 
 | 406 |  * Test if NAPI routine is already running, and if not mark | 
 | 407 |  * it as running.  This is used as a condition variable | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 408 |  * insure only one NAPI poll instance runs.  We also make | 
 | 409 |  * sure there is no pending NAPI disable. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 410 |  */ | 
 | 411 | static inline int napi_schedule_prep(struct napi_struct *n) | 
 | 412 | { | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 413 | 	return !napi_disable_pending(n) && | 
 | 414 | 		!test_and_set_bit(NAPI_STATE_SCHED, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 415 | } | 
 | 416 |  | 
 | 417 | /** | 
 | 418 |  *	napi_schedule - schedule NAPI poll | 
 | 419 |  *	@n: napi context | 
 | 420 |  * | 
 | 421 |  * Schedule NAPI poll routine to be called if it is not already | 
 | 422 |  * running. | 
 | 423 |  */ | 
 | 424 | static inline void napi_schedule(struct napi_struct *n) | 
 | 425 | { | 
 | 426 | 	if (napi_schedule_prep(n)) | 
 | 427 | 		__napi_schedule(n); | 
 | 428 | } | 
 | 429 |  | 
| Roland Dreier | bfe13f5 | 2007-10-09 15:47:37 -0700 | [diff] [blame] | 430 | /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */ | 
 | 431 | static inline int napi_reschedule(struct napi_struct *napi) | 
 | 432 | { | 
 | 433 | 	if (napi_schedule_prep(napi)) { | 
 | 434 | 		__napi_schedule(napi); | 
 | 435 | 		return 1; | 
 | 436 | 	} | 
 | 437 | 	return 0; | 
 | 438 | } | 
 | 439 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 440 | /** | 
 | 441 |  *	napi_complete - NAPI processing complete | 
 | 442 |  *	@n: napi context | 
 | 443 |  * | 
 | 444 |  * Mark NAPI processing as complete. | 
 | 445 |  */ | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 446 | extern void __napi_complete(struct napi_struct *n); | 
 | 447 | extern void napi_complete(struct napi_struct *n); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 448 |  | 
 | 449 | /** | 
 | 450 |  *	napi_disable - prevent NAPI from scheduling | 
 | 451 |  *	@n: napi context | 
 | 452 |  * | 
 | 453 |  * Stop NAPI from being scheduled on this context. | 
 | 454 |  * Waits till any outstanding processing completes. | 
 | 455 |  */ | 
 | 456 | static inline void napi_disable(struct napi_struct *n) | 
 | 457 | { | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 458 | 	set_bit(NAPI_STATE_DISABLE, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 459 | 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 
| Benjamin Herrenschmidt | 43cc738 | 2007-10-26 04:23:22 -0700 | [diff] [blame] | 460 | 		msleep(1); | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 461 | 	clear_bit(NAPI_STATE_DISABLE, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 462 | } | 
 | 463 |  | 
 | 464 | /** | 
 | 465 |  *	napi_enable - enable NAPI scheduling | 
 | 466 |  *	@n: napi context | 
 | 467 |  * | 
 | 468 |  * Resume NAPI from being scheduled on this context. | 
 | 469 |  * Must be paired with napi_disable. | 
 | 470 |  */ | 
 | 471 | static inline void napi_enable(struct napi_struct *n) | 
 | 472 | { | 
 | 473 | 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 
 | 474 | 	smp_mb__before_clear_bit(); | 
 | 475 | 	clear_bit(NAPI_STATE_SCHED, &n->state); | 
 | 476 | } | 
 | 477 |  | 
| Stephen Hemminger | c264c3d | 2007-10-17 13:26:41 -0700 | [diff] [blame] | 478 | #ifdef CONFIG_SMP | 
 | 479 | /** | 
 | 480 |  *	napi_synchronize - wait until NAPI is not running | 
 | 481 |  *	@n: napi context | 
 | 482 |  * | 
 | 483 |  * Wait until NAPI is done being scheduled on this context. | 
 | 484 |  * Waits till any outstanding processing completes but | 
 | 485 |  * does not disable future activations. | 
 | 486 |  */ | 
 | 487 | static inline void napi_synchronize(const struct napi_struct *n) | 
 | 488 | { | 
 | 489 | 	while (test_bit(NAPI_STATE_SCHED, &n->state)) | 
 | 490 | 		msleep(1); | 
 | 491 | } | 
 | 492 | #else | 
 | 493 | # define napi_synchronize(n)	barrier() | 
 | 494 | #endif | 
 | 495 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 496 | enum netdev_queue_state_t { | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 497 | 	__QUEUE_STATE_XOFF, | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 498 | 	__QUEUE_STATE_FROZEN, | 
| Eric Dumazet | 5a0d226 | 2010-11-23 10:42:02 +0000 | [diff] [blame] | 499 | #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)		| \ | 
 | 500 | 				    (1 << __QUEUE_STATE_FROZEN)) | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 501 | }; | 
 | 502 |  | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 503 | struct netdev_queue { | 
| Eric Dumazet | 6a321cb | 2009-04-28 04:43:42 -0700 | [diff] [blame] | 504 | /* | 
 | 505 |  * read mostly part | 
 | 506 |  */ | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 507 | 	struct net_device	*dev; | 
| David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 508 | 	struct Qdisc		*qdisc; | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 509 | 	unsigned long		state; | 
| David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 510 | 	struct Qdisc		*qdisc_sleeping; | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 511 | #ifdef CONFIG_RPS | 
 | 512 | 	struct kobject		kobj; | 
 | 513 | #endif | 
| Eric Dumazet | f2cd2d3 | 2010-11-29 08:14:37 +0000 | [diff] [blame] | 514 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | 
 | 515 | 	int			numa_node; | 
 | 516 | #endif | 
| Eric Dumazet | 6a321cb | 2009-04-28 04:43:42 -0700 | [diff] [blame] | 517 | /* | 
 | 518 |  * write mostly part | 
 | 519 |  */ | 
 | 520 | 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp; | 
 | 521 | 	int			xmit_lock_owner; | 
| Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 522 | 	/* | 
 | 523 | 	 * please use this field instead of dev->trans_start | 
 | 524 | 	 */ | 
 | 525 | 	unsigned long		trans_start; | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 526 | } ____cacheline_aligned_in_smp; | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 527 |  | 
| Eric Dumazet | f2cd2d3 | 2010-11-29 08:14:37 +0000 | [diff] [blame] | 528 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) | 
 | 529 | { | 
 | 530 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | 
 | 531 | 	return q->numa_node; | 
 | 532 | #else | 
| Changli Gao | b236da6 | 2010-12-14 03:09:15 +0000 | [diff] [blame] | 533 | 	return NUMA_NO_NODE; | 
| Eric Dumazet | f2cd2d3 | 2010-11-29 08:14:37 +0000 | [diff] [blame] | 534 | #endif | 
 | 535 | } | 
 | 536 |  | 
 | 537 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) | 
 | 538 | { | 
 | 539 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | 
 | 540 | 	q->numa_node = node; | 
 | 541 | #endif | 
 | 542 | } | 
 | 543 |  | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 544 | #ifdef CONFIG_RPS | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 545 | /* | 
 | 546 |  * This structure holds an RPS map which can be of variable length.  The | 
 | 547 |  * map is an array of CPUs. | 
 | 548 |  */ | 
 | 549 | struct rps_map { | 
 | 550 | 	unsigned int len; | 
 | 551 | 	struct rcu_head rcu; | 
 | 552 | 	u16 cpus[0]; | 
 | 553 | }; | 
 | 554 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) | 
 | 555 |  | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 556 | /* | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 557 |  * The rps_dev_flow structure contains the mapping of a flow to a CPU, the | 
 | 558 |  * tail pointer for that CPU's input queue at the time of last enqueue, and | 
 | 559 |  * a hardware filter index. | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 560 |  */ | 
 | 561 | struct rps_dev_flow { | 
 | 562 | 	u16 cpu; | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 563 | 	u16 filter; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 564 | 	unsigned int last_qtail; | 
 | 565 | }; | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 566 | #define RPS_NO_FILTER 0xffff | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 567 |  | 
 | 568 | /* | 
 | 569 |  * The rps_dev_flow_table structure contains a table of flow mappings. | 
 | 570 |  */ | 
 | 571 | struct rps_dev_flow_table { | 
 | 572 | 	unsigned int mask; | 
 | 573 | 	struct rcu_head rcu; | 
 | 574 | 	struct work_struct free_work; | 
 | 575 | 	struct rps_dev_flow flows[0]; | 
 | 576 | }; | 
 | 577 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | 
 | 578 |     (_num * sizeof(struct rps_dev_flow))) | 
 | 579 |  | 
 | 580 | /* | 
 | 581 |  * The rps_sock_flow_table contains mappings of flows to the last CPU | 
 | 582 |  * on which they were processed by the application (set in recvmsg). | 
 | 583 |  */ | 
 | 584 | struct rps_sock_flow_table { | 
 | 585 | 	unsigned int mask; | 
 | 586 | 	u16 ents[0]; | 
 | 587 | }; | 
 | 588 | #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ | 
 | 589 |     (_num * sizeof(u16))) | 
 | 590 |  | 
 | 591 | #define RPS_NO_CPU 0xffff | 
 | 592 |  | 
 | 593 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, | 
 | 594 | 					u32 hash) | 
 | 595 | { | 
 | 596 | 	if (table && hash) { | 
 | 597 | 		unsigned int cpu, index = hash & table->mask; | 
 | 598 |  | 
 | 599 | 		/* We only give a hint, preemption can change cpu under us */ | 
 | 600 | 		cpu = raw_smp_processor_id(); | 
 | 601 |  | 
 | 602 | 		if (table->ents[index] != cpu) | 
 | 603 | 			table->ents[index] = cpu; | 
 | 604 | 	} | 
 | 605 | } | 
 | 606 |  | 
 | 607 | static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, | 
 | 608 | 				       u32 hash) | 
 | 609 | { | 
 | 610 | 	if (table && hash) | 
 | 611 | 		table->ents[hash & table->mask] = RPS_NO_CPU; | 
 | 612 | } | 
 | 613 |  | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 614 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 615 |  | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 616 | #ifdef CONFIG_RFS_ACCEL | 
 | 617 | extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | 
 | 618 | 				u32 flow_id, u16 filter_id); | 
 | 619 | #endif | 
 | 620 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 621 | /* This structure contains an instance of an RX queue. */ | 
 | 622 | struct netdev_rx_queue { | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 623 | 	struct rps_map __rcu		*rps_map; | 
 | 624 | 	struct rps_dev_flow_table __rcu	*rps_flow_table; | 
 | 625 | 	struct kobject			kobj; | 
| Tom Herbert | fe82224 | 2010-11-09 10:47:38 +0000 | [diff] [blame] | 626 | 	struct net_device		*dev; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 627 | } ____cacheline_aligned_in_smp; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 628 | #endif /* CONFIG_RPS */ | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 629 |  | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 630 | #ifdef CONFIG_XPS | 
 | 631 | /* | 
 | 632 |  * This structure holds an XPS map which can be of variable length.  The | 
 | 633 |  * map is an array of queues. | 
 | 634 |  */ | 
 | 635 | struct xps_map { | 
 | 636 | 	unsigned int len; | 
 | 637 | 	unsigned int alloc_len; | 
 | 638 | 	struct rcu_head rcu; | 
 | 639 | 	u16 queues[0]; | 
 | 640 | }; | 
 | 641 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16))) | 
 | 642 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))	\ | 
 | 643 |     / sizeof(u16)) | 
 | 644 |  | 
 | 645 | /* | 
 | 646 |  * This structure holds all XPS maps for device.  Maps are indexed by CPU. | 
 | 647 |  */ | 
 | 648 | struct xps_dev_maps { | 
 | 649 | 	struct rcu_head rcu; | 
| Eric Dumazet | a417786 | 2010-11-28 21:43:02 +0000 | [diff] [blame] | 650 | 	struct xps_map __rcu *cpu_map[0]; | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 651 | }; | 
 | 652 | #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\ | 
 | 653 |     (nr_cpu_ids * sizeof(struct xps_map *))) | 
 | 654 | #endif /* CONFIG_XPS */ | 
 | 655 |  | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 656 | #define TC_MAX_QUEUE	16 | 
 | 657 | #define TC_BITMASK	15 | 
 | 658 | /* HW offloaded queuing disciplines txq count and offset maps */ | 
 | 659 | struct netdev_tc_txq { | 
 | 660 | 	u16 count; | 
 | 661 | 	u16 offset; | 
 | 662 | }; | 
 | 663 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 664 | /* | 
 | 665 |  * This structure defines the management hooks for network devices. | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 666 |  * The following hooks can be defined; unless noted otherwise, they are | 
 | 667 |  * optional and can be filled with a null pointer. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 668 |  * | 
 | 669 |  * int (*ndo_init)(struct net_device *dev); | 
 | 670 |  *     This function is called once when network device is registered. | 
 | 671 |  *     The network device can use this to any late stage initializaton | 
 | 672 |  *     or semantic validattion. It can fail with an error code which will | 
 | 673 |  *     be propogated back to register_netdev | 
 | 674 |  * | 
 | 675 |  * void (*ndo_uninit)(struct net_device *dev); | 
 | 676 |  *     This function is called when device is unregistered or when registration | 
 | 677 |  *     fails. It is not called if init fails. | 
 | 678 |  * | 
 | 679 |  * int (*ndo_open)(struct net_device *dev); | 
 | 680 |  *     This function is called when network device transistions to the up | 
 | 681 |  *     state. | 
 | 682 |  * | 
 | 683 |  * int (*ndo_stop)(struct net_device *dev); | 
 | 684 |  *     This function is called when network device transistions to the down | 
 | 685 |  *     state. | 
 | 686 |  * | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 687 |  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, | 
 | 688 |  *                               struct net_device *dev); | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 689 |  *	Called when a packet needs to be transmitted. | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 690 |  *	Must return NETDEV_TX_OK , NETDEV_TX_BUSY. | 
 | 691 |  *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 692 |  *	Required can not be NULL. | 
 | 693 |  * | 
 | 694 |  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | 
 | 695 |  *	Called to decide which queue to when device supports multiple | 
 | 696 |  *	transmit queues. | 
 | 697 |  * | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 698 |  * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); | 
 | 699 |  *	This function is called to allow device receiver to make | 
 | 700 |  *	changes to configuration when multicast or promiscious is enabled. | 
 | 701 |  * | 
 | 702 |  * void (*ndo_set_rx_mode)(struct net_device *dev); | 
 | 703 |  *	This function is called device changes address list filtering. | 
 | 704 |  * | 
 | 705 |  * void (*ndo_set_multicast_list)(struct net_device *dev); | 
 | 706 |  *	This function is called when the multicast address list changes. | 
 | 707 |  * | 
 | 708 |  * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | 
 | 709 |  *	This function  is called when the Media Access Control address | 
| Mike Rapoport | 37b607c | 2009-04-27 05:45:54 -0700 | [diff] [blame] | 710 |  *	needs to be changed. If this interface is not defined, the | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 711 |  *	mac address can not be changed. | 
 | 712 |  * | 
 | 713 |  * int (*ndo_validate_addr)(struct net_device *dev); | 
 | 714 |  *	Test if Media Access Control address is valid for the device. | 
 | 715 |  * | 
 | 716 |  * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | 
 | 717 |  *	Called when a user request an ioctl which can't be handled by | 
 | 718 |  *	the generic interface code. If not defined ioctl's return | 
 | 719 |  *	not supported error code. | 
 | 720 |  * | 
 | 721 |  * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | 
 | 722 |  *	Used to set network devices bus interface parameters. This interface | 
 | 723 |  *	is retained for legacy reason, new devices should use the bus | 
 | 724 |  *	interface (PCI) for low level management. | 
 | 725 |  * | 
 | 726 |  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | 
 | 727 |  *	Called when a user wants to change the Maximum Transfer Unit | 
 | 728 |  *	of a device. If not defined, any request to change MTU will | 
 | 729 |  *	will return an error. | 
 | 730 |  * | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 731 |  * void (*ndo_tx_timeout)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 732 |  *	Callback uses when the transmitter has not made any progress | 
 | 733 |  *	for dev->watchdog ticks. | 
 | 734 |  * | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 735 |  * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 736 |  *                      struct rtnl_link_stats64 *storage); | 
| Wolfram Sang | d308e38 | 2009-10-07 13:53:11 -0700 | [diff] [blame] | 737 |  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 738 |  *	Called when a user wants to get the network device usage | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 739 |  *	statistics. Drivers must do one of the following: | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 740 |  *	1. Define @ndo_get_stats64 to fill in a zero-initialised | 
 | 741 |  *	   rtnl_link_stats64 structure passed by the caller. | 
| Ben Hutchings | 82695d9 | 2010-06-15 15:08:48 -0700 | [diff] [blame] | 742 |  *	2. Define @ndo_get_stats to update a net_device_stats structure | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 743 |  *	   (which should normally be dev->stats) and return a pointer to | 
 | 744 |  *	   it. The structure may be changed asynchronously only if each | 
 | 745 |  *	   field is written atomically. | 
 | 746 |  *	3. Update dev->stats asynchronously and atomically, and define | 
 | 747 |  *	   neither operation. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 748 |  * | 
 | 749 |  * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); | 
| Michal Simek | 68763c8 | 2011-01-02 22:54:09 +0000 | [diff] [blame] | 750 |  *	If device support VLAN receive acceleration | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 751 |  *	(ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called | 
 | 752 |  *	when vlan groups for the device changes.  Note: grp is NULL | 
 | 753 |  *	if no vlan's groups are being used. | 
 | 754 |  * | 
 | 755 |  * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); | 
 | 756 |  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | 
 | 757 |  *	this function is called when a VLAN id is registered. | 
 | 758 |  * | 
 | 759 |  * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); | 
 | 760 |  *	If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) | 
 | 761 |  *	this function is called when a VLAN id is unregistered. | 
 | 762 |  * | 
 | 763 |  * void (*ndo_poll_controller)(struct net_device *dev); | 
| Williams, Mitch A | 95c26df | 2010-02-10 01:43:46 +0000 | [diff] [blame] | 764 |  * | 
 | 765 |  *	SR-IOV management functions. | 
 | 766 |  * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | 
 | 767 |  * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); | 
 | 768 |  * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); | 
 | 769 |  * int (*ndo_get_vf_config)(struct net_device *dev, | 
 | 770 |  *			    int vf, struct ifla_vf_info *ivf); | 
| Scott Feldman | 57b6108 | 2010-05-17 22:49:55 -0700 | [diff] [blame] | 771 |  * int (*ndo_set_vf_port)(struct net_device *dev, int vf, | 
 | 772 |  *			  struct nlattr *port[]); | 
 | 773 |  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 774 |  * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) | 
 | 775 |  * 	Called to setup 'tc' number of traffic classes in the net device. This | 
 | 776 |  * 	is always called from the stack with the rtnl lock held and netif tx | 
 | 777 |  * 	queues stopped. This allows the netdevice to perform queue management | 
 | 778 |  * 	safely. | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 779 |  * | 
 | 780 |  *	RFS acceleration. | 
 | 781 |  * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | 
 | 782 |  *			    u16 rxq_index, u32 flow_id); | 
 | 783 |  *	Set hardware filter for RFS.  rxq_index is the target queue index; | 
 | 784 |  *	flow_id is a flow ID to be passed to rps_may_expire_flow() later. | 
 | 785 |  *	Return the filter ID on success, or a negative error code. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 786 |  */ | 
| Stephen Hemminger | 47fd5b8 | 2008-11-25 00:20:43 -0800 | [diff] [blame] | 787 | #define HAVE_NET_DEVICE_OPS | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 788 | struct net_device_ops { | 
 | 789 | 	int			(*ndo_init)(struct net_device *dev); | 
 | 790 | 	void			(*ndo_uninit)(struct net_device *dev); | 
 | 791 | 	int			(*ndo_open)(struct net_device *dev); | 
 | 792 | 	int			(*ndo_stop)(struct net_device *dev); | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 793 | 	netdev_tx_t		(*ndo_start_xmit) (struct sk_buff *skb, | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 794 | 						   struct net_device *dev); | 
 | 795 | 	u16			(*ndo_select_queue)(struct net_device *dev, | 
 | 796 | 						    struct sk_buff *skb); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 797 | 	void			(*ndo_change_rx_flags)(struct net_device *dev, | 
 | 798 | 						       int flags); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 799 | 	void			(*ndo_set_rx_mode)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 800 | 	void			(*ndo_set_multicast_list)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 801 | 	int			(*ndo_set_mac_address)(struct net_device *dev, | 
 | 802 | 						       void *addr); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 803 | 	int			(*ndo_validate_addr)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 804 | 	int			(*ndo_do_ioctl)(struct net_device *dev, | 
 | 805 | 					        struct ifreq *ifr, int cmd); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 806 | 	int			(*ndo_set_config)(struct net_device *dev, | 
 | 807 | 					          struct ifmap *map); | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 808 | 	int			(*ndo_change_mtu)(struct net_device *dev, | 
 | 809 | 						  int new_mtu); | 
 | 810 | 	int			(*ndo_neigh_setup)(struct net_device *dev, | 
 | 811 | 						   struct neigh_parms *); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 812 | 	void			(*ndo_tx_timeout) (struct net_device *dev); | 
 | 813 |  | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 814 | 	struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, | 
 | 815 | 						     struct rtnl_link_stats64 *storage); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 816 | 	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); | 
 | 817 |  | 
 | 818 | 	void			(*ndo_vlan_rx_register)(struct net_device *dev, | 
 | 819 | 						        struct vlan_group *grp); | 
 | 820 | 	void			(*ndo_vlan_rx_add_vid)(struct net_device *dev, | 
 | 821 | 						       unsigned short vid); | 
 | 822 | 	void			(*ndo_vlan_rx_kill_vid)(struct net_device *dev, | 
 | 823 | 						        unsigned short vid); | 
 | 824 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 825 | 	void                    (*ndo_poll_controller)(struct net_device *dev); | 
| Herbert Xu | 4247e16 | 2010-06-10 16:12:47 +0000 | [diff] [blame] | 826 | 	int			(*ndo_netpoll_setup)(struct net_device *dev, | 
 | 827 | 						     struct netpoll_info *info); | 
| WANG Cong | 0e34e93 | 2010-05-06 00:47:21 -0700 | [diff] [blame] | 828 | 	void			(*ndo_netpoll_cleanup)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 829 | #endif | 
| Williams, Mitch A | 95c26df | 2010-02-10 01:43:46 +0000 | [diff] [blame] | 830 | 	int			(*ndo_set_vf_mac)(struct net_device *dev, | 
 | 831 | 						  int queue, u8 *mac); | 
 | 832 | 	int			(*ndo_set_vf_vlan)(struct net_device *dev, | 
 | 833 | 						   int queue, u16 vlan, u8 qos); | 
 | 834 | 	int			(*ndo_set_vf_tx_rate)(struct net_device *dev, | 
 | 835 | 						      int vf, int rate); | 
 | 836 | 	int			(*ndo_get_vf_config)(struct net_device *dev, | 
 | 837 | 						     int vf, | 
 | 838 | 						     struct ifla_vf_info *ivf); | 
| Scott Feldman | 57b6108 | 2010-05-17 22:49:55 -0700 | [diff] [blame] | 839 | 	int			(*ndo_set_vf_port)(struct net_device *dev, | 
 | 840 | 						   int vf, | 
 | 841 | 						   struct nlattr *port[]); | 
 | 842 | 	int			(*ndo_get_vf_port)(struct net_device *dev, | 
 | 843 | 						   int vf, struct sk_buff *skb); | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 844 | 	int			(*ndo_setup_tc)(struct net_device *dev, u8 tc); | 
| Yi Zou | 4d288d5 | 2009-02-27 14:06:59 -0800 | [diff] [blame] | 845 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 
| Yi Zou | cb45439 | 2009-08-31 12:31:36 +0000 | [diff] [blame] | 846 | 	int			(*ndo_fcoe_enable)(struct net_device *dev); | 
 | 847 | 	int			(*ndo_fcoe_disable)(struct net_device *dev); | 
| Yi Zou | 4d288d5 | 2009-02-27 14:06:59 -0800 | [diff] [blame] | 848 | 	int			(*ndo_fcoe_ddp_setup)(struct net_device *dev, | 
 | 849 | 						      u16 xid, | 
 | 850 | 						      struct scatterlist *sgl, | 
 | 851 | 						      unsigned int sgc); | 
 | 852 | 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev, | 
 | 853 | 						     u16 xid); | 
| Yi Zou | df5c794 | 2009-10-28 18:24:35 +0000 | [diff] [blame] | 854 | #define NETDEV_FCOE_WWNN 0 | 
 | 855 | #define NETDEV_FCOE_WWPN 1 | 
 | 856 | 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev, | 
 | 857 | 						    u64 *wwn, int type); | 
| Yi Zou | 4d288d5 | 2009-02-27 14:06:59 -0800 | [diff] [blame] | 858 | #endif | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 859 | #ifdef CONFIG_RFS_ACCEL | 
 | 860 | 	int			(*ndo_rx_flow_steer)(struct net_device *dev, | 
 | 861 | 						     const struct sk_buff *skb, | 
 | 862 | 						     u16 rxq_index, | 
 | 863 | 						     u32 flow_id); | 
 | 864 | #endif | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 865 | }; | 
 | 866 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 867 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 |  *	The DEVICE structure. | 
 | 869 |  *	Actually, this whole structure is a big mistake.  It mixes I/O | 
 | 870 |  *	data with strictly "high-level" data, and it has to know about | 
 | 871 |  *	almost every data structure used in the INET module. | 
 | 872 |  * | 
 | 873 |  *	FIXME: cleanup struct net_device such that network protocol info | 
 | 874 |  *	moves out. | 
 | 875 |  */ | 
 | 876 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 877 | struct net_device { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 |  | 
 | 879 | 	/* | 
 | 880 | 	 * This is the first field of the "visible" part of this structure | 
 | 881 | 	 * (i.e. as seen by users in the "Space.c" file).  It is the name | 
| Justin P. Mattock | 724df61 | 2010-05-26 09:22:40 -0700 | [diff] [blame] | 882 | 	 * of the interface. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | 	 */ | 
 | 884 | 	char			name[IFNAMSIZ]; | 
| Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 885 |  | 
| James Bottomley | 82f6825 | 2010-07-05 22:53:06 +0200 | [diff] [blame] | 886 | 	struct pm_qos_request_list pm_qos_req; | 
| Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 887 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 888 | 	/* device name hash chain */ | 
 | 889 | 	struct hlist_node	name_hlist; | 
| Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 890 | 	/* snmp alias */ | 
 | 891 | 	char 			*ifalias; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 |  | 
 | 893 | 	/* | 
 | 894 | 	 *	I/O specific fields | 
 | 895 | 	 *	FIXME: Merge these and struct ifmap into one | 
 | 896 | 	 */ | 
 | 897 | 	unsigned long		mem_end;	/* shared mem end	*/ | 
 | 898 | 	unsigned long		mem_start;	/* shared mem start	*/ | 
 | 899 | 	unsigned long		base_addr;	/* device I/O address	*/ | 
 | 900 | 	unsigned int		irq;		/* device IRQ number	*/ | 
 | 901 |  | 
 | 902 | 	/* | 
 | 903 | 	 *	Some hardware also needs these fields, but they are not | 
 | 904 | 	 *	part of the usual set specified in Space.c. | 
 | 905 | 	 */ | 
 | 906 |  | 
 | 907 | 	unsigned char		if_port;	/* Selectable AUI, TP,..*/ | 
 | 908 | 	unsigned char		dma;		/* DMA channel		*/ | 
 | 909 |  | 
 | 910 | 	unsigned long		state; | 
 | 911 |  | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 912 | 	struct list_head	dev_list; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 913 | 	struct list_head	napi_list; | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 914 | 	struct list_head	unreg_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 916 | 	/* Net device features */ | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 917 | 	u32			features; | 
 | 918 |  | 
 | 919 | 	/* VLAN feature mask */ | 
 | 920 | 	u32			vlan_features; | 
 | 921 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 922 | #define NETIF_F_SG		1	/* Scatter/gather IO. */ | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 923 | #define NETIF_F_IP_CSUM		2	/* Can checksum TCP/UDP over IPv4. */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 924 | #define NETIF_F_NO_CSUM		4	/* Does not require checksum. F.e. loopack. */ | 
 | 925 | #define NETIF_F_HW_CSUM		8	/* Can checksum all the packets. */ | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 926 | #define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 927 | #define NETIF_F_HIGHDMA		32	/* Can DMA to high memory. */ | 
 | 928 | #define NETIF_F_FRAGLIST	64	/* Scatter/gather IO. */ | 
 | 929 | #define NETIF_F_HW_VLAN_TX	128	/* Transmit VLAN hw acceleration */ | 
 | 930 | #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */ | 
 | 931 | #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */ | 
 | 932 | #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */ | 
| Herbert Xu | 37c3185 | 2006-06-22 03:07:29 -0700 | [diff] [blame] | 933 | #define NETIF_F_GSO		2048	/* Enable software GSO. */ | 
| Christian Borntraeger | e24eb52 | 2007-09-25 19:42:02 -0700 | [diff] [blame] | 934 | #define NETIF_F_LLTX		4096	/* LockLess TX - deprecated. Please */ | 
 | 935 | 					/* do not use LLTX in new drivers */ | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 936 | #define NETIF_F_NETNS_LOCAL	8192	/* Does not change network namespaces */ | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 937 | #define NETIF_F_GRO		16384	/* Generic receive offload */ | 
| Jeff Garzik | 3ae7c0b | 2007-08-15 16:00:51 -0700 | [diff] [blame] | 938 | #define NETIF_F_LRO		32768	/* large receive offload */ | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 939 |  | 
| Jesse Brandeburg | 8dc92f7 | 2009-04-27 22:35:52 +0000 | [diff] [blame] | 940 | /* the GSO_MASK reserves bits 16 through 23 */ | 
| Chris Leech | 01d5b2f | 2009-02-27 14:06:49 -0800 | [diff] [blame] | 941 | #define NETIF_F_FCOE_CRC	(1 << 24) /* FCoE CRC32 */ | 
| Jesse Brandeburg | 8dc92f7 | 2009-04-27 22:35:52 +0000 | [diff] [blame] | 942 | #define NETIF_F_SCTP_CSUM	(1 << 25) /* SCTP checksum offload */ | 
| Yi Zou | bb2af4f | 2009-08-14 12:41:57 +0000 | [diff] [blame] | 943 | #define NETIF_F_FCOE_MTU	(1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ | 
| Peter P Waskiewicz Jr | 15682bc | 2010-02-10 20:03:05 -0800 | [diff] [blame] | 944 | #define NETIF_F_NTUPLE		(1 << 27) /* N-tuple filters supported */ | 
| stephen hemminger | b00fabb | 2010-03-29 14:47:27 +0000 | [diff] [blame] | 945 | #define NETIF_F_RXHASH		(1 << 28) /* Receive hashing offload */ | 
| Chris Leech | 01d5b2f | 2009-02-27 14:06:49 -0800 | [diff] [blame] | 946 |  | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 947 | 	/* Segmentation offload features */ | 
| Patrick McHardy | 289c79a | 2008-05-23 00:22:04 -0700 | [diff] [blame] | 948 | #define NETIF_F_GSO_SHIFT	16 | 
| Chris Leech | 43eb99c | 2009-02-27 14:06:43 -0800 | [diff] [blame] | 949 | #define NETIF_F_GSO_MASK	0x00ff0000 | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 950 | #define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) | 
| Herbert Xu | f83ef8c | 2006-06-30 13:37:03 -0700 | [diff] [blame] | 951 | #define NETIF_F_UFO		(SKB_GSO_UDP << NETIF_F_GSO_SHIFT) | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 952 | #define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) | 
| Herbert Xu | f83ef8c | 2006-06-30 13:37:03 -0700 | [diff] [blame] | 953 | #define NETIF_F_TSO_ECN		(SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) | 
 | 954 | #define NETIF_F_TSO6		(SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | 
| Chris Leech | 01d5b2f | 2009-02-27 14:06:49 -0800 | [diff] [blame] | 955 | #define NETIF_F_FSO		(SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 956 |  | 
| Herbert Xu | 78eb887 | 2006-08-17 18:22:32 -0700 | [diff] [blame] | 957 | 	/* List of features with software fallbacks. */ | 
| Herbert Xu | d29c0c5 | 2010-06-14 20:21:04 +0000 | [diff] [blame] | 958 | #define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | \ | 
 | 959 | 				 NETIF_F_TSO6 | NETIF_F_UFO) | 
| Herbert Xu | 78eb887 | 2006-08-17 18:22:32 -0700 | [diff] [blame] | 960 |  | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 961 |  | 
| Herbert Xu | 8648b30 | 2006-06-17 22:06:05 -0700 | [diff] [blame] | 962 | #define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 963 | #define NETIF_F_V4_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) | 
 | 964 | #define NETIF_F_V6_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | 
 | 965 | #define NETIF_F_ALL_CSUM	(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | 
| Herbert Xu | 8648b30 | 2006-06-17 22:06:05 -0700 | [diff] [blame] | 966 |  | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 967 | 	/* | 
 | 968 | 	 * If one device supports one of these features, then enable them | 
 | 969 | 	 * for all in netdev_increment_features. | 
 | 970 | 	 */ | 
 | 971 | #define NETIF_F_ONE_FOR_ALL	(NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 972 | 				 NETIF_F_SG | NETIF_F_HIGHDMA |		\ | 
| Herbert Xu | b63365a | 2008-10-23 01:11:29 -0700 | [diff] [blame] | 973 | 				 NETIF_F_FRAGLIST) | 
 | 974 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | 	/* Interface index. Unique device identifier	*/ | 
 | 976 | 	int			ifindex; | 
 | 977 | 	int			iflink; | 
 | 978 |  | 
| Rusty Russell | c45d286 | 2007-03-28 14:29:08 -0700 | [diff] [blame] | 979 | 	struct net_device_stats	stats; | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 980 | 	atomic_long_t		rx_dropped; /* dropped packets by core network | 
 | 981 | 					     * Do not use this in drivers. | 
 | 982 | 					     */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 |  | 
| Johannes Berg | b86e028 | 2007-04-26 20:48:23 -0700 | [diff] [blame] | 984 | #ifdef CONFIG_WIRELESS_EXT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | 	/* List of functions to handle Wireless Extensions (instead of ioctl). | 
 | 986 | 	 * See <net/iw_handler.h> for details. Jean II */ | 
 | 987 | 	const struct iw_handler_def *	wireless_handlers; | 
 | 988 | 	/* Instance data managed by the core of Wireless Extensions. */ | 
 | 989 | 	struct iw_public_data *	wireless_data; | 
| Johannes Berg | b86e028 | 2007-04-26 20:48:23 -0700 | [diff] [blame] | 990 | #endif | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 991 | 	/* Management operations */ | 
 | 992 | 	const struct net_device_ops *netdev_ops; | 
| Stephen Hemminger | 76fd859 | 2006-09-08 11:16:13 -0700 | [diff] [blame] | 993 | 	const struct ethtool_ops *ethtool_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 |  | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 995 | 	/* Hardware header description */ | 
 | 996 | 	const struct header_ops *header_ops; | 
 | 997 |  | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 998 | 	unsigned int		flags;	/* interface flags (a la BSD)	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | 	unsigned short		gflags; | 
| Simon Horman | 1726442 | 2010-08-23 16:26:41 +0000 | [diff] [blame] | 1000 |         unsigned int            priv_flags; /* Like 'flags' but invisible to userspace. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | 	unsigned short		padded;	/* How much padding added by alloc_netdev() */ | 
 | 1002 |  | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1003 | 	unsigned char		operstate; /* RFC2863 operstate */ | 
 | 1004 | 	unsigned char		link_mode; /* mapping policy to operstate */ | 
 | 1005 |  | 
| David S. Miller | cd7b539 | 2010-05-02 22:27:59 -0700 | [diff] [blame] | 1006 | 	unsigned int		mtu;	/* interface MTU value		*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | 	unsigned short		type;	/* interface hardware type	*/ | 
 | 1008 | 	unsigned short		hard_header_len;	/* hardware hdr length	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 |  | 
| Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 1010 | 	/* extra head- and tailroom the hardware may need, but not in all cases | 
 | 1011 | 	 * can this be guaranteed, especially tailroom. Some cases also use | 
 | 1012 | 	 * LL_MAX_HEADER instead to allocate the skb. | 
 | 1013 | 	 */ | 
 | 1014 | 	unsigned short		needed_headroom; | 
 | 1015 | 	unsigned short		needed_tailroom; | 
 | 1016 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | 	/* Interface address info. */ | 
| Jon Wetzel | a6f9a70 | 2005-08-20 17:15:54 -0700 | [diff] [blame] | 1018 | 	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 
| Stefan Assmann | c1f7942 | 2010-07-22 02:50:21 +0000 | [diff] [blame] | 1019 | 	unsigned char		addr_assign_type; /* hw address assignment type */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | 	unsigned char		addr_len;	/* hardware address length	*/ | 
 | 1021 | 	unsigned short          dev_id;		/* for shared network cards */ | 
 | 1022 |  | 
| Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1023 | 	spinlock_t		addr_list_lock; | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1024 | 	struct netdev_hw_addr_list	uc;	/* Unicast mac addresses */ | 
 | 1025 | 	struct netdev_hw_addr_list	mc;	/* Multicast mac addresses */ | 
 | 1026 | 	int			uc_promisc; | 
| Wang Chen | 9d45abe | 2008-06-17 21:12:48 -0700 | [diff] [blame] | 1027 | 	unsigned int		promiscuity; | 
 | 1028 | 	unsigned int		allmulti; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1029 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 |  | 
 | 1031 | 	/* Protocol specific pointers */ | 
| Jesse Gross | 65ac6a5 | 2010-10-20 13:56:05 +0000 | [diff] [blame] | 1032 |  | 
 | 1033 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 
| Eric Dumazet | b616b09 | 2010-10-24 21:31:58 +0000 | [diff] [blame] | 1034 | 	struct vlan_group __rcu	*vlgrp;		/* VLAN group */ | 
| Jesse Gross | 65ac6a5 | 2010-10-20 13:56:05 +0000 | [diff] [blame] | 1035 | #endif | 
| Lennert Buytenhek | 91da11f | 2008-10-07 13:44:02 +0000 | [diff] [blame] | 1036 | #ifdef CONFIG_NET_DSA | 
 | 1037 | 	void			*dsa_ptr;	/* dsa specific data */ | 
 | 1038 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | 	void 			*atalk_ptr;	/* AppleTalk link 	*/ | 
| Eric Dumazet | 95ae6b2 | 2010-09-15 04:04:31 +0000 | [diff] [blame] | 1040 | 	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/ | 
| Eric Dumazet | fc766e4c | 2010-10-29 03:09:24 +0000 | [diff] [blame] | 1041 | 	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */ | 
| Eric Dumazet | 198caec | 2010-10-24 21:32:05 +0000 | [diff] [blame] | 1042 | 	struct inet6_dev __rcu	*ip6_ptr;       /* IPv6 specific data */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | 	void			*ec_ptr;	/* Econet specific data	*/ | 
 | 1044 | 	void			*ax25_ptr;	/* AX.25 specific data */ | 
| Johannes Berg | 704232c | 2007-04-23 12:20:05 -0700 | [diff] [blame] | 1045 | 	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data, | 
 | 1046 | 						   assign before registering */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1048 | /* | 
| Eric Dumazet | cd13539 | 2010-09-16 02:58:13 +0000 | [diff] [blame] | 1049 |  * Cache lines mostly used on receive path (including eth_type_trans()) | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1050 |  */ | 
| Eric Dumazet | 4dc8913 | 2010-08-31 07:40:16 +0000 | [diff] [blame] | 1051 | 	unsigned long		last_rx;	/* Time of last Rx | 
 | 1052 | 						 * This should not be set in | 
 | 1053 | 						 * drivers, unless really needed, | 
 | 1054 | 						 * because network stack (bonding) | 
 | 1055 | 						 * use it if/when necessary, to | 
 | 1056 | 						 * avoid dirtying this cache line. | 
 | 1057 | 						 */ | 
 | 1058 |  | 
| Eric Dumazet | cd13539 | 2010-09-16 02:58:13 +0000 | [diff] [blame] | 1059 | 	struct net_device	*master; /* Pointer to master device of a group, | 
 | 1060 | 					  * which this device is member of. | 
 | 1061 | 					  */ | 
 | 1062 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1063 | 	/* Interface address info used in eth_type_trans() */ | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 1064 | 	unsigned char		*dev_addr;	/* hw address, (before bcast | 
 | 1065 | 						   because most packets are | 
 | 1066 | 						   unicast) */ | 
 | 1067 |  | 
| Jiri Pirko | 31278e7 | 2009-06-17 01:12:19 +0000 | [diff] [blame] | 1068 | 	struct netdev_hw_addr_list	dev_addrs; /* list of device | 
 | 1069 | 						      hw addresses */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1071 | 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/ | 
 | 1072 |  | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 1073 | #ifdef CONFIG_RPS | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1074 | 	struct kset		*queues_kset; | 
 | 1075 |  | 
 | 1076 | 	struct netdev_rx_queue	*_rx; | 
 | 1077 |  | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1078 | 	/* Number of RX queues allocated at register_netdev() time */ | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1079 | 	unsigned int		num_rx_queues; | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1080 |  | 
 | 1081 | 	/* Number of RX queues currently active in device */ | 
 | 1082 | 	unsigned int		real_num_rx_queues; | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 1083 |  | 
 | 1084 | #ifdef CONFIG_RFS_ACCEL | 
 | 1085 | 	/* CPU reverse-mapping for RX completion interrupts, indexed | 
 | 1086 | 	 * by RX queue number.  Assigned by driver.  This must only be | 
 | 1087 | 	 * set if the ndo_rx_flow_steer operation is defined. */ | 
 | 1088 | 	struct cpu_rmap		*rx_cpu_rmap; | 
 | 1089 | #endif | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 1090 | #endif | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1091 |  | 
| stephen hemminger | 61391cd | 2010-11-15 06:38:12 +0000 | [diff] [blame] | 1092 | 	rx_handler_func_t __rcu	*rx_handler; | 
 | 1093 | 	void __rcu		*rx_handler_data; | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1094 |  | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 1095 | 	struct netdev_queue __rcu *ingress_queue; | 
| Eric Dumazet | cd13539 | 2010-09-16 02:58:13 +0000 | [diff] [blame] | 1096 |  | 
 | 1097 | /* | 
 | 1098 |  * Cache lines mostly used on transmit path | 
 | 1099 |  */ | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1100 | 	struct netdev_queue	*_tx ____cacheline_aligned_in_smp; | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1101 |  | 
 | 1102 | 	/* Number of TX queues allocated at alloc_netdev_mq() time  */ | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1103 | 	unsigned int		num_tx_queues; | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1104 |  | 
 | 1105 | 	/* Number of TX queues currently active in device  */ | 
 | 1106 | 	unsigned int		real_num_tx_queues; | 
 | 1107 |  | 
| Patrick McHardy | af356af | 2009-09-04 06:41:18 +0000 | [diff] [blame] | 1108 | 	/* root qdisc from userspace point of view */ | 
 | 1109 | 	struct Qdisc		*qdisc; | 
 | 1110 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */ | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 1112 | 	spinlock_t		tx_global_lock; | 
| Eric Dumazet | cd13539 | 2010-09-16 02:58:13 +0000 | [diff] [blame] | 1113 |  | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1114 | #ifdef CONFIG_XPS | 
| Eric Dumazet | a417786 | 2010-11-28 21:43:02 +0000 | [diff] [blame] | 1115 | 	struct xps_dev_maps __rcu *xps_maps; | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1116 | #endif | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1117 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1118 | 	/* These may be needed for future network-power-down code. */ | 
| Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 1119 |  | 
 | 1120 | 	/* | 
 | 1121 | 	 * trans_start here is expensive for high speed devices on SMP, | 
 | 1122 | 	 * please use netdev_queue->trans_start instead. | 
 | 1123 | 	 */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1124 | 	unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/ | 
 | 1125 |  | 
 | 1126 | 	int			watchdog_timeo; /* used by dev_watchdog() */ | 
 | 1127 | 	struct timer_list	watchdog_timer; | 
 | 1128 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | 	/* Number of references to this device */ | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 1130 | 	int __percpu		*pcpu_refcnt; | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1131 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | 	/* delayed register/unregister */ | 
 | 1133 | 	struct list_head	todo_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | 	/* device index hash chain */ | 
 | 1135 | 	struct hlist_node	index_hlist; | 
 | 1136 |  | 
| Eric Dumazet | e014deb | 2009-11-17 05:59:21 +0000 | [diff] [blame] | 1137 | 	struct list_head	link_watch_list; | 
| Herbert Xu | 572a103 | 2007-05-08 18:34:17 -0700 | [diff] [blame] | 1138 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | 	/* register/unregister state machine */ | 
 | 1140 | 	enum { NETREG_UNINITIALIZED=0, | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 1141 | 	       NETREG_REGISTERED,	/* completed register_netdevice */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */ | 
 | 1143 | 	       NETREG_UNREGISTERED,	/* completed unregister todo */ | 
 | 1144 | 	       NETREG_RELEASED,		/* called free_netdev */ | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 1145 | 	       NETREG_DUMMY,		/* dummy device for NAPI poll */ | 
| Patrick McHardy | a283576 | 2010-02-26 06:34:51 +0000 | [diff] [blame] | 1146 | 	} reg_state:16; | 
 | 1147 |  | 
 | 1148 | 	enum { | 
 | 1149 | 		RTNL_LINK_INITIALIZED, | 
 | 1150 | 		RTNL_LINK_INITIALIZING, | 
 | 1151 | 	} rtnl_link_state:16; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1153 | 	/* Called from unregister, can be used to call free_netdev */ | 
 | 1154 | 	void (*destructor)(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 | #ifdef CONFIG_NETPOLL | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 1157 | 	struct netpoll_info	*npinfo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | #endif | 
| David S. Miller | eae792b | 2008-07-15 03:03:33 -0700 | [diff] [blame] | 1159 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1160 | #ifdef CONFIG_NET_NS | 
| Eric W. Biederman | 4a1c537 | 2007-09-12 11:56:32 +0200 | [diff] [blame] | 1161 | 	/* Network namespace this network device is inside */ | 
 | 1162 | 	struct net		*nd_net; | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1163 | #endif | 
| Eric W. Biederman | 4a1c537 | 2007-09-12 11:56:32 +0200 | [diff] [blame] | 1164 |  | 
| David S. Miller | 4951704 | 2008-05-12 03:29:11 -0700 | [diff] [blame] | 1165 | 	/* mid-layer private */ | 
| Eric Dumazet | a7855c7 | 2010-09-23 23:51:51 +0000 | [diff] [blame] | 1166 | 	union { | 
 | 1167 | 		void				*ml_priv; | 
 | 1168 | 		struct pcpu_lstats __percpu	*lstats; /* loopback stats */ | 
| Eric Dumazet | 290b895 | 2010-09-27 00:33:35 +0000 | [diff] [blame] | 1169 | 		struct pcpu_tstats __percpu	*tstats; /* tunnel stats */ | 
| Eric Dumazet | 6d81f41 | 2010-09-27 20:50:33 +0000 | [diff] [blame] | 1170 | 		struct pcpu_dstats __percpu	*dstats; /* dummy stats */ | 
| Eric Dumazet | a7855c7 | 2010-09-23 23:51:51 +0000 | [diff] [blame] | 1171 | 	}; | 
| Patrick McHardy | eca9eba | 2008-07-05 21:26:13 -0700 | [diff] [blame] | 1172 | 	/* GARP */ | 
| Eric Dumazet | 3cc77ec | 2010-10-24 21:32:36 +0000 | [diff] [blame] | 1173 | 	struct garp_port __rcu	*garp_port; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | 	/* class/net/name entry */ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1176 | 	struct device		dev; | 
| Eric W. Biederman | 0c509a6 | 2009-10-29 14:18:21 +0000 | [diff] [blame] | 1177 | 	/* space for optional device, statistics, and wireless sysfs groups */ | 
 | 1178 | 	const struct attribute_group *sysfs_groups[4]; | 
| Patrick McHardy | 38f7b87 | 2007-06-13 12:03:51 -0700 | [diff] [blame] | 1179 |  | 
 | 1180 | 	/* rtnetlink link ops */ | 
 | 1181 | 	const struct rtnl_link_ops *rtnl_link_ops; | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1182 |  | 
| Peter P Waskiewicz Jr | 82cc1a7 | 2008-03-21 03:43:19 -0700 | [diff] [blame] | 1183 | 	/* for setting kernel sock attribute on TCP connection setup */ | 
 | 1184 | #define GSO_MAX_SIZE		65536 | 
 | 1185 | 	unsigned int		gso_max_size; | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1186 |  | 
| Jeff Kirsher | 7a6b6f5 | 2008-11-25 01:02:08 -0800 | [diff] [blame] | 1187 | #ifdef CONFIG_DCB | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 1188 | 	/* Data Center Bridging netlink ops */ | 
| Stephen Hemminger | 3295354 | 2009-10-05 06:01:03 +0000 | [diff] [blame] | 1189 | 	const struct dcbnl_rtnl_ops *dcbnl_ops; | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 1190 | #endif | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1191 | 	u8 num_tc; | 
 | 1192 | 	struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | 
 | 1193 | 	u8 prio_tc_map[TC_BITMASK + 1]; | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 1194 |  | 
| Yi Zou | 4d288d5 | 2009-02-27 14:06:59 -0800 | [diff] [blame] | 1195 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 
 | 1196 | 	/* max exchange id for FCoE LRO by ddp */ | 
 | 1197 | 	unsigned int		fcoe_ddp_xid; | 
 | 1198 | #endif | 
| Peter P Waskiewicz Jr | 15682bc | 2010-02-10 20:03:05 -0800 | [diff] [blame] | 1199 | 	/* n-tuple filter list attached to this device */ | 
 | 1200 | 	struct ethtool_rx_ntuple_list ethtool_ntuple_list; | 
| Richard Cochran | c1f19b5 | 2010-07-17 08:49:36 +0000 | [diff] [blame] | 1201 |  | 
 | 1202 | 	/* phy device may attach itself for hardware timestamping */ | 
 | 1203 | 	struct phy_device *phydev; | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 1204 |  | 
 | 1205 | 	/* group the device belongs to */ | 
 | 1206 | 	int group; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | }; | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1208 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 |  | 
 | 1210 | #define	NETDEV_ALIGN		32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 |  | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1212 | static inline | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1213 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | 
 | 1214 | { | 
 | 1215 | 	return dev->prio_tc_map[prio & TC_BITMASK]; | 
 | 1216 | } | 
 | 1217 |  | 
 | 1218 | static inline | 
 | 1219 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | 
 | 1220 | { | 
 | 1221 | 	if (tc >= dev->num_tc) | 
 | 1222 | 		return -EINVAL; | 
 | 1223 |  | 
 | 1224 | 	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | 
 | 1225 | 	return 0; | 
 | 1226 | } | 
 | 1227 |  | 
 | 1228 | static inline | 
 | 1229 | void netdev_reset_tc(struct net_device *dev) | 
 | 1230 | { | 
 | 1231 | 	dev->num_tc = 0; | 
 | 1232 | 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); | 
 | 1233 | 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); | 
 | 1234 | } | 
 | 1235 |  | 
 | 1236 | static inline | 
 | 1237 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) | 
 | 1238 | { | 
 | 1239 | 	if (tc >= dev->num_tc) | 
 | 1240 | 		return -EINVAL; | 
 | 1241 |  | 
 | 1242 | 	dev->tc_to_txq[tc].count = count; | 
 | 1243 | 	dev->tc_to_txq[tc].offset = offset; | 
 | 1244 | 	return 0; | 
 | 1245 | } | 
 | 1246 |  | 
 | 1247 | static inline | 
 | 1248 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc) | 
 | 1249 | { | 
 | 1250 | 	if (num_tc > TC_MAX_QUEUE) | 
 | 1251 | 		return -EINVAL; | 
 | 1252 |  | 
 | 1253 | 	dev->num_tc = num_tc; | 
 | 1254 | 	return 0; | 
 | 1255 | } | 
 | 1256 |  | 
 | 1257 | static inline | 
 | 1258 | int netdev_get_num_tc(struct net_device *dev) | 
 | 1259 | { | 
 | 1260 | 	return dev->num_tc; | 
 | 1261 | } | 
 | 1262 |  | 
 | 1263 | static inline | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1264 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | 
 | 1265 | 					 unsigned int index) | 
 | 1266 | { | 
 | 1267 | 	return &dev->_tx[index]; | 
 | 1268 | } | 
 | 1269 |  | 
 | 1270 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | 
 | 1271 | 					    void (*f)(struct net_device *, | 
 | 1272 | 						      struct netdev_queue *, | 
 | 1273 | 						      void *), | 
 | 1274 | 					    void *arg) | 
 | 1275 | { | 
 | 1276 | 	unsigned int i; | 
 | 1277 |  | 
 | 1278 | 	for (i = 0; i < dev->num_tx_queues; i++) | 
 | 1279 | 		f(dev, &dev->_tx[i], arg); | 
 | 1280 | } | 
 | 1281 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1282 | /* | 
 | 1283 |  * Net namespace inlines | 
 | 1284 |  */ | 
 | 1285 | static inline | 
 | 1286 | struct net *dev_net(const struct net_device *dev) | 
 | 1287 | { | 
| Eric Dumazet | c2d9ba9 | 2010-06-01 06:51:19 +0000 | [diff] [blame] | 1288 | 	return read_pnet(&dev->nd_net); | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1289 | } | 
 | 1290 |  | 
 | 1291 | static inline | 
| Denis V. Lunev | f5aa23f | 2008-03-26 00:48:17 -0700 | [diff] [blame] | 1292 | void dev_net_set(struct net_device *dev, struct net *net) | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1293 | { | 
 | 1294 | #ifdef CONFIG_NET_NS | 
| Denis V. Lunev | f3005d7 | 2008-04-16 02:02:18 -0700 | [diff] [blame] | 1295 | 	release_net(dev->nd_net); | 
 | 1296 | 	dev->nd_net = hold_net(net); | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1297 | #endif | 
 | 1298 | } | 
 | 1299 |  | 
| Lennert Buytenhek | cf85d08 | 2008-10-07 13:45:02 +0000 | [diff] [blame] | 1300 | static inline bool netdev_uses_dsa_tags(struct net_device *dev) | 
 | 1301 | { | 
 | 1302 | #ifdef CONFIG_NET_DSA_TAG_DSA | 
 | 1303 | 	if (dev->dsa_ptr != NULL) | 
 | 1304 | 		return dsa_uses_dsa_tags(dev->dsa_ptr); | 
 | 1305 | #endif | 
 | 1306 |  | 
 | 1307 | 	return 0; | 
 | 1308 | } | 
 | 1309 |  | 
| Arnd Bergmann | 8a83a00 | 2010-01-30 12:23:03 +0000 | [diff] [blame] | 1310 | #ifndef CONFIG_NET_NS | 
 | 1311 | static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | 
 | 1312 | { | 
 | 1313 | 	skb->dev = dev; | 
 | 1314 | } | 
 | 1315 | #else /* CONFIG_NET_NS */ | 
 | 1316 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev); | 
 | 1317 | #endif | 
 | 1318 |  | 
| Lennert Buytenhek | 396138f0 | 2008-10-07 13:46:07 +0000 | [diff] [blame] | 1319 | static inline bool netdev_uses_trailer_tags(struct net_device *dev) | 
 | 1320 | { | 
 | 1321 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | 
 | 1322 | 	if (dev->dsa_ptr != NULL) | 
 | 1323 | 		return dsa_uses_trailer_tags(dev->dsa_ptr); | 
 | 1324 | #endif | 
 | 1325 |  | 
 | 1326 | 	return 0; | 
 | 1327 | } | 
 | 1328 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1329 | /** | 
 | 1330 |  *	netdev_priv - access network device private data | 
 | 1331 |  *	@dev: network device | 
 | 1332 |  * | 
 | 1333 |  * Get network device private data | 
 | 1334 |  */ | 
| Patrick McHardy | 6472ce6 | 2007-06-13 12:03:21 -0700 | [diff] [blame] | 1335 | static inline void *netdev_priv(const struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | { | 
| Eric Dumazet | 1ce8e7b | 2009-05-27 04:42:37 +0000 | [diff] [blame] | 1337 | 	return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | } | 
 | 1339 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1340 | /* Set the sysfs physical device reference for the network logical device | 
 | 1341 |  * if set prior to registration will cause a symlink during initialization. | 
 | 1342 |  */ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1343 | #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 |  | 
| Marcel Holtmann | 384912e | 2009-08-31 21:08:19 +0000 | [diff] [blame] | 1345 | /* Set the sysfs device type for the network logical device to allow | 
 | 1346 |  * fin grained indentification of different network device types. For | 
 | 1347 |  * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. | 
 | 1348 |  */ | 
 | 1349 | #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype)) | 
 | 1350 |  | 
| Stephen Hemminger | 3b582cc | 2007-11-01 02:21:47 -0700 | [diff] [blame] | 1351 | /** | 
 | 1352 |  *	netif_napi_add - initialize a napi context | 
 | 1353 |  *	@dev:  network device | 
 | 1354 |  *	@napi: napi context | 
 | 1355 |  *	@poll: polling function | 
 | 1356 |  *	@weight: default weight | 
 | 1357 |  * | 
 | 1358 |  * netif_napi_add() must be used to initialize a napi context prior to calling | 
 | 1359 |  * *any* of the other napi related functions. | 
 | 1360 |  */ | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1361 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | 
 | 1362 | 		    int (*poll)(struct napi_struct *, int), int weight); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1363 |  | 
| Alexander Duyck | d815653 | 2008-07-08 15:13:05 -0700 | [diff] [blame] | 1364 | /** | 
 | 1365 |  *  netif_napi_del - remove a napi context | 
 | 1366 |  *  @napi: napi context | 
 | 1367 |  * | 
 | 1368 |  *  netif_napi_del() removes a napi context from the network device napi list | 
 | 1369 |  */ | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1370 | void netif_napi_del(struct napi_struct *napi); | 
 | 1371 |  | 
 | 1372 | struct napi_gro_cb { | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 1373 | 	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ | 
 | 1374 | 	void *frag0; | 
 | 1375 |  | 
| Herbert Xu | 7489594 | 2009-05-26 18:50:27 +0000 | [diff] [blame] | 1376 | 	/* Length of frag0. */ | 
 | 1377 | 	unsigned int frag0_len; | 
 | 1378 |  | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1379 | 	/* This indicates where we are processing relative to skb->data. */ | 
 | 1380 | 	int data_offset; | 
 | 1381 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1382 | 	/* This is non-zero if the packet may be of the same flow. */ | 
 | 1383 | 	int same_flow; | 
 | 1384 |  | 
 | 1385 | 	/* This is non-zero if the packet cannot be merged with the new skb. */ | 
 | 1386 | 	int flush; | 
 | 1387 |  | 
 | 1388 | 	/* Number of segments aggregated. */ | 
 | 1389 | 	int count; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 1390 |  | 
 | 1391 | 	/* Free the skb? */ | 
 | 1392 | 	int free; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1393 | }; | 
 | 1394 |  | 
 | 1395 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | 
| Alexander Duyck | d815653 | 2008-07-08 15:13:05 -0700 | [diff] [blame] | 1396 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | struct packet_type { | 
| David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 1398 | 	__be16			type;	/* This is really htons(ether_type). */ | 
 | 1399 | 	struct net_device	*dev;	/* NULL is wildcarded here	     */ | 
 | 1400 | 	int			(*func) (struct sk_buff *, | 
 | 1401 | 					 struct net_device *, | 
 | 1402 | 					 struct packet_type *, | 
 | 1403 | 					 struct net_device *); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1404 | 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb, | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 1405 | 						u32 features); | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1406 | 	int			(*gso_send_check)(struct sk_buff *skb); | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1407 | 	struct sk_buff		**(*gro_receive)(struct sk_buff **head, | 
 | 1408 | 					       struct sk_buff *skb); | 
 | 1409 | 	int			(*gro_complete)(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 | 	void			*af_packet_priv; | 
 | 1411 | 	struct list_head	list; | 
 | 1412 | }; | 
 | 1413 |  | 
 | 1414 | #include <linux/interrupt.h> | 
 | 1415 | #include <linux/notifier.h> | 
 | 1416 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1417 | extern rwlock_t				dev_base_lock;		/* Device list lock */ | 
 | 1418 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1419 |  | 
 | 1420 | #define for_each_netdev(net, d)		\ | 
 | 1421 | 		list_for_each_entry(d, &(net)->dev_base_head, dev_list) | 
| Eric W. Biederman | dcbccbd4 | 2009-11-29 22:25:26 +0000 | [diff] [blame] | 1422 | #define for_each_netdev_reverse(net, d)	\ | 
 | 1423 | 		list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 1424 | #define for_each_netdev_rcu(net, d)		\ | 
 | 1425 | 		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1426 | #define for_each_netdev_safe(net, d, n)	\ | 
 | 1427 | 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | 
 | 1428 | #define for_each_netdev_continue(net, d)		\ | 
 | 1429 | 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | 
| stephen hemminger | 254245d | 2009-11-10 07:54:47 +0000 | [diff] [blame] | 1430 | #define for_each_netdev_continue_rcu(net, d)		\ | 
 | 1431 | 	list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 1432 | #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list) | 
 | 1433 |  | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 1434 | static inline struct net_device *next_net_device(struct net_device *dev) | 
 | 1435 | { | 
 | 1436 | 	struct list_head *lh; | 
 | 1437 | 	struct net *net; | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 1438 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1439 | 	net = dev_net(dev); | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 1440 | 	lh = dev->dev_list.next; | 
 | 1441 | 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 
 | 1442 | } | 
 | 1443 |  | 
| Eric Dumazet | ce81b76 | 2009-11-11 17:34:30 +0000 | [diff] [blame] | 1444 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) | 
 | 1445 | { | 
 | 1446 | 	struct list_head *lh; | 
 | 1447 | 	struct net *net; | 
 | 1448 |  | 
 | 1449 | 	net = dev_net(dev); | 
| Eric Dumazet | ccf4343 | 2011-01-26 18:08:02 +0000 | [diff] [blame^] | 1450 | 	lh = rcu_dereference(list_next_rcu(&dev->dev_list)); | 
| Eric Dumazet | ce81b76 | 2009-11-11 17:34:30 +0000 | [diff] [blame] | 1451 | 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 
 | 1452 | } | 
 | 1453 |  | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 1454 | static inline struct net_device *first_net_device(struct net *net) | 
 | 1455 | { | 
 | 1456 | 	return list_empty(&net->dev_base_head) ? NULL : | 
 | 1457 | 		net_device_entry(net->dev_base_head.next); | 
 | 1458 | } | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 1459 |  | 
| Eric Dumazet | ccf4343 | 2011-01-26 18:08:02 +0000 | [diff] [blame^] | 1460 | static inline struct net_device *first_net_device_rcu(struct net *net) | 
 | 1461 | { | 
 | 1462 | 	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | 
 | 1463 |  | 
 | 1464 | 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 
 | 1465 | } | 
 | 1466 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | extern int 			netdev_boot_setup_check(struct net_device *dev); | 
 | 1468 | extern unsigned long		netdev_boot_base(const char *prefix, int unit); | 
| Eric Dumazet | 941666c | 2010-12-05 01:23:53 +0000 | [diff] [blame] | 1469 | extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | 
 | 1470 | 					      const char *hwaddr); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1471 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | 
 | 1472 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | extern void		dev_add_pack(struct packet_type *pt); | 
 | 1474 | extern void		dev_remove_pack(struct packet_type *pt); | 
 | 1475 | extern void		__dev_remove_pack(struct packet_type *pt); | 
 | 1476 |  | 
| Eric Dumazet | bb69ae0 | 2010-06-07 11:42:13 +0000 | [diff] [blame] | 1477 | extern struct net_device	*dev_get_by_flags_rcu(struct net *net, unsigned short flags, | 
 | 1478 | 						      unsigned short mask); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1479 | extern struct net_device	*dev_get_by_name(struct net *net, const char *name); | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 1480 | extern struct net_device	*dev_get_by_name_rcu(struct net *net, const char *name); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1481 | extern struct net_device	*__dev_get_by_name(struct net *net, const char *name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | extern int		dev_alloc_name(struct net_device *dev, const char *name); | 
 | 1483 | extern int		dev_open(struct net_device *dev); | 
 | 1484 | extern int		dev_close(struct net_device *dev); | 
| Ben Hutchings | 0187bdf | 2008-06-19 16:15:47 -0700 | [diff] [blame] | 1485 | extern void		dev_disable_lro(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1486 | extern int		dev_queue_xmit(struct sk_buff *skb); | 
 | 1487 | extern int		register_netdevice(struct net_device *dev); | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 1488 | extern void		unregister_netdevice_queue(struct net_device *dev, | 
 | 1489 | 						   struct list_head *head); | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 1490 | extern void		unregister_netdevice_many(struct list_head *head); | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 1491 | static inline void unregister_netdevice(struct net_device *dev) | 
 | 1492 | { | 
 | 1493 | 	unregister_netdevice_queue(dev, NULL); | 
 | 1494 | } | 
 | 1495 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 1496 | extern int 		netdev_refcnt_read(const struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1497 | extern void		free_netdev(struct net_device *dev); | 
 | 1498 | extern void		synchronize_net(void); | 
 | 1499 | extern int 		register_netdevice_notifier(struct notifier_block *nb); | 
 | 1500 | extern int		unregister_netdevice_notifier(struct notifier_block *nb); | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 1501 | extern int		init_dummy_netdev(struct net_device *dev); | 
| David S. Miller | 9d40bbd | 2009-03-04 23:46:25 -0800 | [diff] [blame] | 1502 | extern void		netdev_resync_ops(struct net_device *dev); | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 1503 |  | 
| Eric W. Biederman | ad7379d | 2007-09-16 15:33:32 -0700 | [diff] [blame] | 1504 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1505 | extern struct net_device	*dev_get_by_index(struct net *net, int ifindex); | 
 | 1506 | extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex); | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 1507 | extern struct net_device	*dev_get_by_index_rcu(struct net *net, int ifindex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1508 | extern int		dev_restart(struct net_device *dev); | 
 | 1509 | #ifdef CONFIG_NETPOLL_TRAP | 
 | 1510 | extern int		netpoll_trap(void); | 
 | 1511 | #endif | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1512 | extern int	       skb_gro_receive(struct sk_buff **head, | 
 | 1513 | 				       struct sk_buff *skb); | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 1514 | extern void	       skb_gro_reset_offset(struct sk_buff *skb); | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1515 |  | 
 | 1516 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | 
 | 1517 | { | 
 | 1518 | 	return NAPI_GRO_CB(skb)->data_offset; | 
 | 1519 | } | 
 | 1520 |  | 
 | 1521 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | 
 | 1522 | { | 
 | 1523 | 	return skb->len - NAPI_GRO_CB(skb)->data_offset; | 
 | 1524 | } | 
 | 1525 |  | 
 | 1526 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | 
 | 1527 | { | 
 | 1528 | 	NAPI_GRO_CB(skb)->data_offset += len; | 
 | 1529 | } | 
 | 1530 |  | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 1531 | static inline void *skb_gro_header_fast(struct sk_buff *skb, | 
 | 1532 | 					unsigned int offset) | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1533 | { | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 1534 | 	return NAPI_GRO_CB(skb)->frag0 + offset; | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1535 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 |  | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 1537 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) | 
 | 1538 | { | 
 | 1539 | 	return NAPI_GRO_CB(skb)->frag0_len < hlen; | 
 | 1540 | } | 
 | 1541 |  | 
 | 1542 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, | 
 | 1543 | 					unsigned int offset) | 
 | 1544 | { | 
 | 1545 | 	NAPI_GRO_CB(skb)->frag0 = NULL; | 
 | 1546 | 	NAPI_GRO_CB(skb)->frag0_len = 0; | 
 | 1547 | 	return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; | 
 | 1548 | } | 
 | 1549 |  | 
| Herbert Xu | aa4b9f5 | 2009-02-08 18:00:37 +0000 | [diff] [blame] | 1550 | static inline void *skb_gro_mac_header(struct sk_buff *skb) | 
 | 1551 | { | 
| Herbert Xu | 78d3fd0 | 2009-05-26 18:50:23 +0000 | [diff] [blame] | 1552 | 	return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); | 
| Herbert Xu | aa4b9f5 | 2009-02-08 18:00:37 +0000 | [diff] [blame] | 1553 | } | 
 | 1554 |  | 
| Herbert Xu | 36e7b1b | 2009-04-27 05:44:45 -0700 | [diff] [blame] | 1555 | static inline void *skb_gro_network_header(struct sk_buff *skb) | 
 | 1556 | { | 
| Herbert Xu | 78d3fd0 | 2009-05-26 18:50:23 +0000 | [diff] [blame] | 1557 | 	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + | 
 | 1558 | 	       skb_network_offset(skb); | 
| Herbert Xu | 36e7b1b | 2009-04-27 05:44:45 -0700 | [diff] [blame] | 1559 | } | 
 | 1560 |  | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1561 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 
 | 1562 | 				  unsigned short type, | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 1563 | 				  const void *daddr, const void *saddr, | 
 | 1564 | 				  unsigned len) | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1565 | { | 
| Ursula Braun | f1ecfd5 | 2007-10-22 16:16:14 +0200 | [diff] [blame] | 1566 | 	if (!dev->header_ops || !dev->header_ops->create) | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1567 | 		return 0; | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 1568 |  | 
 | 1569 | 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1570 | } | 
 | 1571 |  | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 1572 | static inline int dev_parse_header(const struct sk_buff *skb, | 
 | 1573 | 				   unsigned char *haddr) | 
 | 1574 | { | 
 | 1575 | 	const struct net_device *dev = skb->dev; | 
 | 1576 |  | 
| Patrick McHardy | 1b83336 | 2007-10-18 05:09:28 -0700 | [diff] [blame] | 1577 | 	if (!dev->header_ops || !dev->header_ops->parse) | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 1578 | 		return 0; | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 1579 | 	return dev->header_ops->parse(skb, haddr); | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 1580 | } | 
 | 1581 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 
 | 1583 | extern int		register_gifconf(unsigned int family, gifconf_func_t * gifconf); | 
 | 1584 | static inline int unregister_gifconf(unsigned int family) | 
 | 1585 | { | 
 | 1586 | 	return register_gifconf(family, NULL); | 
 | 1587 | } | 
 | 1588 |  | 
 | 1589 | /* | 
| Eric Dumazet | 8875127 | 2010-04-19 05:07:33 +0000 | [diff] [blame] | 1590 |  * Incoming packets are placed on per-cpu queues | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 |  */ | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 1592 | struct softnet_data { | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1593 | 	struct Qdisc		*output_queue; | 
| Changli Gao | a9cbd58 | 2010-04-26 23:06:24 +0000 | [diff] [blame] | 1594 | 	struct Qdisc		**output_queue_tailp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | 	struct list_head	poll_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1596 | 	struct sk_buff		*completion_queue; | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 1597 | 	struct sk_buff_head	process_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 |  | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 1599 | 	/* stats */ | 
| David S. Miller | cd7b539 | 2010-05-02 22:27:59 -0700 | [diff] [blame] | 1600 | 	unsigned int		processed; | 
 | 1601 | 	unsigned int		time_squeeze; | 
 | 1602 | 	unsigned int		cpu_collision; | 
 | 1603 | 	unsigned int		received_rps; | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 1604 |  | 
| Changli Gao | fd793d8 | 2010-04-15 00:16:59 -0700 | [diff] [blame] | 1605 | #ifdef CONFIG_RPS | 
| Eric Dumazet | 8875127 | 2010-04-19 05:07:33 +0000 | [diff] [blame] | 1606 | 	struct softnet_data	*rps_ipi_list; | 
 | 1607 |  | 
 | 1608 | 	/* Elements below can be accessed between CPUs for RPS */ | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1609 | 	struct call_single_data	csd ____cacheline_aligned_in_smp; | 
| Eric Dumazet | 8875127 | 2010-04-19 05:07:33 +0000 | [diff] [blame] | 1610 | 	struct softnet_data	*rps_ipi_next; | 
 | 1611 | 	unsigned int		cpu; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 1612 | 	unsigned int		input_queue_head; | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 1613 | 	unsigned int		input_queue_tail; | 
| Tom Herbert | 1e94d72 | 2010-03-18 17:45:44 -0700 | [diff] [blame] | 1614 | #endif | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 1615 | 	unsigned		dropped; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1616 | 	struct sk_buff_head	input_pkt_queue; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1617 | 	struct napi_struct	backlog; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | }; | 
 | 1619 |  | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 1620 | static inline void input_queue_head_incr(struct softnet_data *sd) | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 1621 | { | 
 | 1622 | #ifdef CONFIG_RPS | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 1623 | 	sd->input_queue_head++; | 
 | 1624 | #endif | 
 | 1625 | } | 
 | 1626 |  | 
 | 1627 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | 
 | 1628 | 					      unsigned int *qtail) | 
 | 1629 | { | 
 | 1630 | #ifdef CONFIG_RPS | 
 | 1631 | 	*qtail = ++sd->input_queue_tail; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 1632 | #endif | 
 | 1633 | } | 
 | 1634 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1635 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 |  | 
 | 1637 | #define HAVE_NETIF_QUEUE | 
 | 1638 |  | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1639 | extern void __netif_schedule(struct Qdisc *q); | 
| David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 1640 |  | 
 | 1641 | static inline void netif_schedule_queue(struct netdev_queue *txq) | 
 | 1642 | { | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 1643 | 	if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1644 | 		__netif_schedule(txq->qdisc); | 
| David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 1645 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1647 | static inline void netif_tx_schedule_all(struct net_device *dev) | 
 | 1648 | { | 
 | 1649 | 	unsigned int i; | 
 | 1650 |  | 
 | 1651 | 	for (i = 0; i < dev->num_tx_queues; i++) | 
 | 1652 | 		netif_schedule_queue(netdev_get_tx_queue(dev, i)); | 
 | 1653 | } | 
 | 1654 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 1655 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | 
 | 1656 | { | 
 | 1657 | 	clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 
 | 1658 | } | 
 | 1659 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1660 | /** | 
 | 1661 |  *	netif_start_queue - allow transmit | 
 | 1662 |  *	@dev: network device | 
 | 1663 |  * | 
 | 1664 |  *	Allow upper layers to call the device hard_start_xmit routine. | 
 | 1665 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 | static inline void netif_start_queue(struct net_device *dev) | 
 | 1667 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1668 | 	netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1669 | } | 
 | 1670 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1671 | static inline void netif_tx_start_all_queues(struct net_device *dev) | 
 | 1672 | { | 
 | 1673 | 	unsigned int i; | 
 | 1674 |  | 
 | 1675 | 	for (i = 0; i < dev->num_tx_queues; i++) { | 
 | 1676 | 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
 | 1677 | 		netif_tx_start_queue(txq); | 
 | 1678 | 	} | 
 | 1679 | } | 
 | 1680 |  | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 1681 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1682 | { | 
 | 1683 | #ifdef CONFIG_NETPOLL_TRAP | 
| Sergei Shtylyov | 5f286e1 | 2007-04-28 20:57:37 -0700 | [diff] [blame] | 1684 | 	if (netpoll_trap()) { | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 1685 | 		netif_tx_start_queue(dev_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 | 		return; | 
| Sergei Shtylyov | 5f286e1 | 2007-04-28 20:57:37 -0700 | [diff] [blame] | 1687 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1688 | #endif | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 1689 | 	if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1690 | 		__netif_schedule(dev_queue->qdisc); | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 1691 | } | 
 | 1692 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 1693 | /** | 
 | 1694 |  *	netif_wake_queue - restart transmit | 
 | 1695 |  *	@dev: network device | 
 | 1696 |  * | 
 | 1697 |  *	Allow upper layers to call the device hard_start_xmit routine. | 
 | 1698 |  *	Used for flow control when transmit resources are available. | 
 | 1699 |  */ | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 1700 | static inline void netif_wake_queue(struct net_device *dev) | 
 | 1701 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1702 | 	netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | } | 
 | 1704 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1705 | static inline void netif_tx_wake_all_queues(struct net_device *dev) | 
 | 1706 | { | 
 | 1707 | 	unsigned int i; | 
 | 1708 |  | 
 | 1709 | 	for (i = 0; i < dev->num_tx_queues; i++) { | 
 | 1710 | 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
 | 1711 | 		netif_tx_wake_queue(txq); | 
 | 1712 | 	} | 
 | 1713 | } | 
 | 1714 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 1715 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | 
 | 1716 | { | 
| Guillaume Chazarain | 18543a6 | 2010-11-06 06:39:32 +0000 | [diff] [blame] | 1717 | 	if (WARN_ON(!dev_queue)) { | 
 | 1718 | 		printk(KERN_INFO "netif_stop_queue() cannot be called before " | 
 | 1719 | 		       "register_netdev()"); | 
 | 1720 | 		return; | 
 | 1721 | 	} | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 1722 | 	set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 
 | 1723 | } | 
 | 1724 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1725 | /** | 
 | 1726 |  *	netif_stop_queue - stop transmitted packets | 
 | 1727 |  *	@dev: network device | 
 | 1728 |  * | 
 | 1729 |  *	Stop upper layers calling the device hard_start_xmit routine. | 
 | 1730 |  *	Used for flow control when transmit resources are unavailable. | 
 | 1731 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 | static inline void netif_stop_queue(struct net_device *dev) | 
 | 1733 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1734 | 	netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | } | 
 | 1736 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1737 | static inline void netif_tx_stop_all_queues(struct net_device *dev) | 
 | 1738 | { | 
 | 1739 | 	unsigned int i; | 
 | 1740 |  | 
 | 1741 | 	for (i = 0; i < dev->num_tx_queues; i++) { | 
 | 1742 | 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
 | 1743 | 		netif_tx_stop_queue(txq); | 
 | 1744 | 	} | 
 | 1745 | } | 
 | 1746 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 1747 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | 
 | 1748 | { | 
 | 1749 | 	return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | 
 | 1750 | } | 
 | 1751 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1752 | /** | 
 | 1753 |  *	netif_queue_stopped - test if transmit queue is flowblocked | 
 | 1754 |  *	@dev: network device | 
 | 1755 |  * | 
 | 1756 |  *	Test if transmit queue on device is currently unable to send. | 
 | 1757 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1758 | static inline int netif_queue_stopped(const struct net_device *dev) | 
 | 1759 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1760 | 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1761 | } | 
 | 1762 |  | 
| Eric Dumazet | 5a0d226 | 2010-11-23 10:42:02 +0000 | [diff] [blame] | 1763 | static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 1764 | { | 
| Eric Dumazet | 5a0d226 | 2010-11-23 10:42:02 +0000 | [diff] [blame] | 1765 | 	return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 1766 | } | 
 | 1767 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1768 | /** | 
 | 1769 |  *	netif_running - test if up | 
 | 1770 |  *	@dev: network device | 
 | 1771 |  * | 
 | 1772 |  *	Test if the device has been brought up. | 
 | 1773 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1774 | static inline int netif_running(const struct net_device *dev) | 
 | 1775 | { | 
 | 1776 | 	return test_bit(__LINK_STATE_START, &dev->state); | 
 | 1777 | } | 
 | 1778 |  | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1779 | /* | 
 | 1780 |  * Routines to manage the subqueues on a device.  We only need start | 
 | 1781 |  * stop, and a check if it's stopped.  All other device management is | 
 | 1782 |  * done at the overall netdevice level. | 
 | 1783 |  * Also test the device if we're multiqueue. | 
 | 1784 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1785 |  | 
 | 1786 | /** | 
 | 1787 |  *	netif_start_subqueue - allow sending packets on subqueue | 
 | 1788 |  *	@dev: network device | 
 | 1789 |  *	@queue_index: sub queue index | 
 | 1790 |  * | 
 | 1791 |  * Start individual transmit queue of a device with multiple transmit queues. | 
 | 1792 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1793 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 
 | 1794 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1795 | 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 1796 |  | 
 | 1797 | 	netif_tx_start_queue(txq); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1798 | } | 
 | 1799 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1800 | /** | 
 | 1801 |  *	netif_stop_subqueue - stop sending packets on subqueue | 
 | 1802 |  *	@dev: network device | 
 | 1803 |  *	@queue_index: sub queue index | 
 | 1804 |  * | 
 | 1805 |  * Stop individual transmit queue of a device with multiple transmit queues. | 
 | 1806 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1807 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | 
 | 1808 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1809 | 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1810 | #ifdef CONFIG_NETPOLL_TRAP | 
 | 1811 | 	if (netpoll_trap()) | 
 | 1812 | 		return; | 
 | 1813 | #endif | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 1814 | 	netif_tx_stop_queue(txq); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1815 | } | 
 | 1816 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1817 | /** | 
 | 1818 |  *	netif_subqueue_stopped - test status of subqueue | 
 | 1819 |  *	@dev: network device | 
 | 1820 |  *	@queue_index: sub queue index | 
 | 1821 |  * | 
 | 1822 |  * Check individual transmit queue of a device with multiple transmit queues. | 
 | 1823 |  */ | 
| Pavel Emelyanov | 668f895 | 2007-10-21 17:01:56 -0700 | [diff] [blame] | 1824 | static inline int __netif_subqueue_stopped(const struct net_device *dev, | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1825 | 					 u16 queue_index) | 
 | 1826 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1827 | 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 1828 |  | 
 | 1829 | 	return netif_tx_queue_stopped(txq); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1830 | } | 
 | 1831 |  | 
| Pavel Emelyanov | 668f895 | 2007-10-21 17:01:56 -0700 | [diff] [blame] | 1832 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 
 | 1833 | 					 struct sk_buff *skb) | 
 | 1834 | { | 
 | 1835 | 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | 
 | 1836 | } | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1837 |  | 
 | 1838 | /** | 
 | 1839 |  *	netif_wake_subqueue - allow sending packets on subqueue | 
 | 1840 |  *	@dev: network device | 
 | 1841 |  *	@queue_index: sub queue index | 
 | 1842 |  * | 
 | 1843 |  * Resume individual transmit queue of a device with multiple transmit queues. | 
 | 1844 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1845 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | 
 | 1846 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1847 | 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1848 | #ifdef CONFIG_NETPOLL_TRAP | 
 | 1849 | 	if (netpoll_trap()) | 
 | 1850 | 		return; | 
 | 1851 | #endif | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1852 | 	if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1853 | 		__netif_schedule(txq->qdisc); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1854 | } | 
 | 1855 |  | 
| Vladislav Zolotarov | a3d22a6 | 2010-12-13 06:27:10 +0000 | [diff] [blame] | 1856 | /* | 
 | 1857 |  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used | 
 | 1858 |  * as a distribution range limit for the returned value. | 
 | 1859 |  */ | 
 | 1860 | static inline u16 skb_tx_hash(const struct net_device *dev, | 
 | 1861 | 			      const struct sk_buff *skb) | 
 | 1862 | { | 
 | 1863 | 	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); | 
 | 1864 | } | 
 | 1865 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1866 | /** | 
 | 1867 |  *	netif_is_multiqueue - test if device has multiple transmit queues | 
 | 1868 |  *	@dev: network device | 
 | 1869 |  * | 
 | 1870 |  * Check if device has multiple transmit queues | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1871 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1872 | static inline int netif_is_multiqueue(const struct net_device *dev) | 
 | 1873 | { | 
| Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 1874 | 	return dev->num_tx_queues > 1; | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1875 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 |  | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 1877 | extern int netif_set_real_num_tx_queues(struct net_device *dev, | 
 | 1878 | 					unsigned int txq); | 
| John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 1879 |  | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1880 | #ifdef CONFIG_RPS | 
 | 1881 | extern int netif_set_real_num_rx_queues(struct net_device *dev, | 
 | 1882 | 					unsigned int rxq); | 
 | 1883 | #else | 
 | 1884 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | 
 | 1885 | 						unsigned int rxq) | 
 | 1886 | { | 
 | 1887 | 	return 0; | 
 | 1888 | } | 
 | 1889 | #endif | 
 | 1890 |  | 
| Ben Hutchings | 3171d02 | 2010-09-27 08:24:49 +0000 | [diff] [blame] | 1891 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, | 
 | 1892 | 					     const struct net_device *from_dev) | 
 | 1893 | { | 
 | 1894 | 	netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); | 
 | 1895 | #ifdef CONFIG_RPS | 
 | 1896 | 	return netif_set_real_num_rx_queues(to_dev, | 
 | 1897 | 					    from_dev->real_num_rx_queues); | 
 | 1898 | #else | 
 | 1899 | 	return 0; | 
 | 1900 | #endif | 
 | 1901 | } | 
 | 1902 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1903 | /* Use this variant when it is known for sure that it | 
| Matti Linnanvuori | 0ef4730 | 2008-03-28 16:33:00 -0700 | [diff] [blame] | 1904 |  * is executing from hardware interrupt context or with hardware interrupts | 
 | 1905 |  * disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1906 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1907 | extern void dev_kfree_skb_irq(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1908 |  | 
 | 1909 | /* Use this variant in places where it could be invoked | 
| Matti Linnanvuori | 0ef4730 | 2008-03-28 16:33:00 -0700 | [diff] [blame] | 1910 |  * from either hardware interrupt or other context, with hardware interrupts | 
 | 1911 |  * either disabled or enabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 |  */ | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1913 | extern void dev_kfree_skb_any(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1914 |  | 
 | 1915 | #define HAVE_NETIF_RX 1 | 
 | 1916 | extern int		netif_rx(struct sk_buff *skb); | 
 | 1917 | extern int		netif_rx_ni(struct sk_buff *skb); | 
 | 1918 | #define HAVE_NETIF_RECEIVE_SKB 1 | 
 | 1919 | extern int		netif_receive_skb(struct sk_buff *skb); | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 1920 | extern gro_result_t	dev_gro_receive(struct napi_struct *napi, | 
| Herbert Xu | 96e93ea | 2009-01-06 10:49:34 -0800 | [diff] [blame] | 1921 | 					struct sk_buff *skb); | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 1922 | extern gro_result_t	napi_skb_finish(gro_result_t ret, struct sk_buff *skb); | 
 | 1923 | extern gro_result_t	napi_gro_receive(struct napi_struct *napi, | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1924 | 					 struct sk_buff *skb); | 
| Eric Dumazet | 86cac58 | 2010-08-31 18:25:32 +0000 | [diff] [blame] | 1925 | extern void		napi_gro_flush(struct napi_struct *napi); | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 1926 | extern struct sk_buff *	napi_get_frags(struct napi_struct *napi); | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 1927 | extern gro_result_t	napi_frags_finish(struct napi_struct *napi, | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 1928 | 					  struct sk_buff *skb, | 
 | 1929 | 					  gro_result_t ret); | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 1930 | extern struct sk_buff *	napi_frags_skb(struct napi_struct *napi); | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 1931 | extern gro_result_t	napi_gro_frags(struct napi_struct *napi); | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 1932 |  | 
 | 1933 | static inline void napi_free_frags(struct napi_struct *napi) | 
 | 1934 | { | 
 | 1935 | 	kfree_skb(napi->skb); | 
 | 1936 | 	napi->skb = NULL; | 
 | 1937 | } | 
 | 1938 |  | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 1939 | extern int netdev_rx_handler_register(struct net_device *dev, | 
| Jiri Pirko | 93e2c32 | 2010-06-10 03:34:59 +0000 | [diff] [blame] | 1940 | 				      rx_handler_func_t *rx_handler, | 
 | 1941 | 				      void *rx_handler_data); | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 1942 | extern void netdev_rx_handler_unregister(struct net_device *dev); | 
 | 1943 |  | 
| Mitch Williams | c2373ee | 2005-11-09 10:34:45 -0800 | [diff] [blame] | 1944 | extern int		dev_valid_name(const char *name); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1945 | extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 
 | 1946 | extern int		dev_ethtool(struct net *net, struct ifreq *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1947 | extern unsigned		dev_get_flags(const struct net_device *); | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1948 | extern int		__dev_change_flags(struct net_device *, unsigned int flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | extern int		dev_change_flags(struct net_device *, unsigned); | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 1950 | extern void		__dev_notify_flags(struct net_device *, unsigned int old_flags); | 
| Stephen Hemminger | cf04a4c7 | 2008-09-30 02:22:14 -0700 | [diff] [blame] | 1951 | extern int		dev_change_name(struct net_device *, const char *); | 
| Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 1952 | extern int		dev_set_alias(struct net_device *, const char *, size_t); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 1953 | extern int		dev_change_net_namespace(struct net_device *, | 
 | 1954 | 						 struct net *, const char *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 | extern int		dev_set_mtu(struct net_device *, int); | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 1956 | extern void		dev_set_group(struct net_device *, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1957 | extern int		dev_set_mac_address(struct net_device *, | 
 | 1958 | 					    struct sockaddr *); | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1959 | extern int		dev_hard_start_xmit(struct sk_buff *skb, | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1960 | 					    struct net_device *dev, | 
 | 1961 | 					    struct netdev_queue *txq); | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 1962 | extern int		dev_forward_skb(struct net_device *dev, | 
 | 1963 | 					struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1964 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1965 | extern int		netdev_budget; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1966 |  | 
 | 1967 | /* Called by rtnetlink.c:rtnl_unlock() */ | 
 | 1968 | extern void netdev_run_todo(void); | 
 | 1969 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1970 | /** | 
 | 1971 |  *	dev_put - release reference to device | 
 | 1972 |  *	@dev: network device | 
 | 1973 |  * | 
| Benjamin Thery | 9ef4429 | 2007-10-10 21:18:17 -0700 | [diff] [blame] | 1974 |  * Release reference to device to allow it to be freed. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1975 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 | static inline void dev_put(struct net_device *dev) | 
 | 1977 | { | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 1978 | 	irqsafe_cpu_dec(*dev->pcpu_refcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1979 | } | 
 | 1980 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1981 | /** | 
 | 1982 |  *	dev_hold - get reference to device | 
 | 1983 |  *	@dev: network device | 
 | 1984 |  * | 
| Benjamin Thery | 9ef4429 | 2007-10-10 21:18:17 -0700 | [diff] [blame] | 1985 |  * Hold reference to device to keep it from being freed. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1986 |  */ | 
| Stephen Hemminger | 1533306 | 2006-03-20 22:32:28 -0800 | [diff] [blame] | 1987 | static inline void dev_hold(struct net_device *dev) | 
 | 1988 | { | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 1989 | 	irqsafe_cpu_inc(*dev->pcpu_refcnt); | 
| Stephen Hemminger | 1533306 | 2006-03-20 22:32:28 -0800 | [diff] [blame] | 1990 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 |  | 
 | 1992 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | 
 | 1993 |  * and _off may be called from IRQ context, but it is caller | 
 | 1994 |  * who is responsible for serialization of these calls. | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1995 |  * | 
 | 1996 |  * The name carrier is inappropriate, these functions should really be | 
 | 1997 |  * called netif_lowerlayer_*() because they represent the state of any | 
 | 1998 |  * kind of lower layer not just hardware media. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 |  */ | 
 | 2000 |  | 
 | 2001 | extern void linkwatch_fire_event(struct net_device *dev); | 
| Eric Dumazet | e014deb | 2009-11-17 05:59:21 +0000 | [diff] [blame] | 2002 | extern void linkwatch_forget_dev(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2003 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2004 | /** | 
 | 2005 |  *	netif_carrier_ok - test if carrier present | 
 | 2006 |  *	@dev: network device | 
 | 2007 |  * | 
 | 2008 |  * Check if carrier is present on device | 
 | 2009 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2010 | static inline int netif_carrier_ok(const struct net_device *dev) | 
 | 2011 | { | 
 | 2012 | 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | 
 | 2013 | } | 
 | 2014 |  | 
| Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 2015 | extern unsigned long dev_trans_start(struct net_device *dev); | 
 | 2016 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2017 | extern void __netdev_watchdog_up(struct net_device *dev); | 
 | 2018 |  | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 2019 | extern void netif_carrier_on(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2020 |  | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 2021 | extern void netif_carrier_off(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2022 |  | 
| Ian Campbell | 06c4648 | 2010-05-26 00:09:42 +0000 | [diff] [blame] | 2023 | extern void netif_notify_peers(struct net_device *dev); | 
 | 2024 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2025 | /** | 
 | 2026 |  *	netif_dormant_on - mark device as dormant. | 
 | 2027 |  *	@dev: network device | 
 | 2028 |  * | 
 | 2029 |  * Mark device as dormant (as per RFC2863). | 
 | 2030 |  * | 
 | 2031 |  * The dormant state indicates that the relevant interface is not | 
 | 2032 |  * actually in a condition to pass packets (i.e., it is not 'up') but is | 
 | 2033 |  * in a "pending" state, waiting for some external event.  For "on- | 
 | 2034 |  * demand" interfaces, this new state identifies the situation where the | 
 | 2035 |  * interface is waiting for events to place it in the up state. | 
 | 2036 |  * | 
 | 2037 |  */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2038 | static inline void netif_dormant_on(struct net_device *dev) | 
 | 2039 | { | 
 | 2040 | 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | 
 | 2041 | 		linkwatch_fire_event(dev); | 
 | 2042 | } | 
 | 2043 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2044 | /** | 
 | 2045 |  *	netif_dormant_off - set device as not dormant. | 
 | 2046 |  *	@dev: network device | 
 | 2047 |  * | 
 | 2048 |  * Device is not in dormant state. | 
 | 2049 |  */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2050 | static inline void netif_dormant_off(struct net_device *dev) | 
 | 2051 | { | 
 | 2052 | 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | 
 | 2053 | 		linkwatch_fire_event(dev); | 
 | 2054 | } | 
 | 2055 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2056 | /** | 
 | 2057 |  *	netif_dormant - test if carrier present | 
 | 2058 |  *	@dev: network device | 
 | 2059 |  * | 
 | 2060 |  * Check if carrier is present on device | 
 | 2061 |  */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2062 | static inline int netif_dormant(const struct net_device *dev) | 
 | 2063 | { | 
 | 2064 | 	return test_bit(__LINK_STATE_DORMANT, &dev->state); | 
 | 2065 | } | 
 | 2066 |  | 
 | 2067 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2068 | /** | 
 | 2069 |  *	netif_oper_up - test if device is operational | 
 | 2070 |  *	@dev: network device | 
 | 2071 |  * | 
 | 2072 |  * Check if carrier is operational | 
 | 2073 |  */ | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 2074 | static inline int netif_oper_up(const struct net_device *dev) | 
 | 2075 | { | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2076 | 	return (dev->operstate == IF_OPER_UP || | 
 | 2077 | 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | 
 | 2078 | } | 
 | 2079 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2080 | /** | 
 | 2081 |  *	netif_device_present - is device available or removed | 
 | 2082 |  *	@dev: network device | 
 | 2083 |  * | 
 | 2084 |  * Check if device has not been removed from system. | 
 | 2085 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2086 | static inline int netif_device_present(struct net_device *dev) | 
 | 2087 | { | 
 | 2088 | 	return test_bit(__LINK_STATE_PRESENT, &dev->state); | 
 | 2089 | } | 
 | 2090 |  | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 2091 | extern void netif_device_detach(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2092 |  | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 2093 | extern void netif_device_attach(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 |  | 
 | 2095 | /* | 
 | 2096 |  * Network interface message level settings | 
 | 2097 |  */ | 
 | 2098 | #define HAVE_NETIF_MSG 1 | 
 | 2099 |  | 
 | 2100 | enum { | 
 | 2101 | 	NETIF_MSG_DRV		= 0x0001, | 
 | 2102 | 	NETIF_MSG_PROBE		= 0x0002, | 
 | 2103 | 	NETIF_MSG_LINK		= 0x0004, | 
 | 2104 | 	NETIF_MSG_TIMER		= 0x0008, | 
 | 2105 | 	NETIF_MSG_IFDOWN	= 0x0010, | 
 | 2106 | 	NETIF_MSG_IFUP		= 0x0020, | 
 | 2107 | 	NETIF_MSG_RX_ERR	= 0x0040, | 
 | 2108 | 	NETIF_MSG_TX_ERR	= 0x0080, | 
 | 2109 | 	NETIF_MSG_TX_QUEUED	= 0x0100, | 
 | 2110 | 	NETIF_MSG_INTR		= 0x0200, | 
 | 2111 | 	NETIF_MSG_TX_DONE	= 0x0400, | 
 | 2112 | 	NETIF_MSG_RX_STATUS	= 0x0800, | 
 | 2113 | 	NETIF_MSG_PKTDATA	= 0x1000, | 
 | 2114 | 	NETIF_MSG_HW		= 0x2000, | 
 | 2115 | 	NETIF_MSG_WOL		= 0x4000, | 
 | 2116 | }; | 
 | 2117 |  | 
 | 2118 | #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV) | 
 | 2119 | #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE) | 
 | 2120 | #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK) | 
 | 2121 | #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER) | 
 | 2122 | #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN) | 
 | 2123 | #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP) | 
 | 2124 | #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR) | 
 | 2125 | #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR) | 
 | 2126 | #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED) | 
 | 2127 | #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR) | 
 | 2128 | #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE) | 
 | 2129 | #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS) | 
 | 2130 | #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA) | 
 | 2131 | #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW) | 
 | 2132 | #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL) | 
 | 2133 |  | 
 | 2134 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | 
 | 2135 | { | 
 | 2136 | 	/* use default */ | 
 | 2137 | 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | 
 | 2138 | 		return default_msg_enable_bits; | 
 | 2139 | 	if (debug_value == 0)	/* no output */ | 
 | 2140 | 		return 0; | 
 | 2141 | 	/* set low N bits */ | 
 | 2142 | 	return (1 << debug_value) - 1; | 
 | 2143 | } | 
 | 2144 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2145 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2146 | { | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2147 | 	spin_lock(&txq->_xmit_lock); | 
 | 2148 | 	txq->xmit_lock_owner = cpu; | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2149 | } | 
 | 2150 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2151 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | 
 | 2152 | { | 
 | 2153 | 	spin_lock_bh(&txq->_xmit_lock); | 
 | 2154 | 	txq->xmit_lock_owner = smp_processor_id(); | 
 | 2155 | } | 
 | 2156 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2157 | static inline int __netif_tx_trylock(struct netdev_queue *txq) | 
 | 2158 | { | 
 | 2159 | 	int ok = spin_trylock(&txq->_xmit_lock); | 
 | 2160 | 	if (likely(ok)) | 
 | 2161 | 		txq->xmit_lock_owner = smp_processor_id(); | 
 | 2162 | 	return ok; | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2163 | } | 
 | 2164 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2165 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | 
 | 2166 | { | 
 | 2167 | 	txq->xmit_lock_owner = -1; | 
 | 2168 | 	spin_unlock(&txq->_xmit_lock); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2169 | } | 
 | 2170 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2171 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | 
 | 2172 | { | 
 | 2173 | 	txq->xmit_lock_owner = -1; | 
 | 2174 | 	spin_unlock_bh(&txq->_xmit_lock); | 
 | 2175 | } | 
 | 2176 |  | 
| Eric Dumazet | 08baf56 | 2009-05-25 22:58:01 -0700 | [diff] [blame] | 2177 | static inline void txq_trans_update(struct netdev_queue *txq) | 
 | 2178 | { | 
 | 2179 | 	if (txq->xmit_lock_owner != -1) | 
 | 2180 | 		txq->trans_start = jiffies; | 
 | 2181 | } | 
 | 2182 |  | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2183 | /** | 
 | 2184 |  *	netif_tx_lock - grab network device transmit lock | 
 | 2185 |  *	@dev: network device | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2186 |  * | 
 | 2187 |  * Get network device transmit lock | 
 | 2188 |  */ | 
 | 2189 | static inline void netif_tx_lock(struct net_device *dev) | 
 | 2190 | { | 
 | 2191 | 	unsigned int i; | 
 | 2192 | 	int cpu; | 
 | 2193 |  | 
 | 2194 | 	spin_lock(&dev->tx_global_lock); | 
 | 2195 | 	cpu = smp_processor_id(); | 
 | 2196 | 	for (i = 0; i < dev->num_tx_queues; i++) { | 
 | 2197 | 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
 | 2198 |  | 
 | 2199 | 		/* We are the only thread of execution doing a | 
 | 2200 | 		 * freeze, but we have to grab the _xmit_lock in | 
 | 2201 | 		 * order to synchronize with threads which are in | 
 | 2202 | 		 * the ->hard_start_xmit() handler and already | 
 | 2203 | 		 * checked the frozen bit. | 
 | 2204 | 		 */ | 
 | 2205 | 		__netif_tx_lock(txq, cpu); | 
 | 2206 | 		set_bit(__QUEUE_STATE_FROZEN, &txq->state); | 
 | 2207 | 		__netif_tx_unlock(txq); | 
 | 2208 | 	} | 
 | 2209 | } | 
 | 2210 |  | 
 | 2211 | static inline void netif_tx_lock_bh(struct net_device *dev) | 
 | 2212 | { | 
 | 2213 | 	local_bh_disable(); | 
 | 2214 | 	netif_tx_lock(dev); | 
 | 2215 | } | 
 | 2216 |  | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2217 | static inline void netif_tx_unlock(struct net_device *dev) | 
 | 2218 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2219 | 	unsigned int i; | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2220 |  | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2221 | 	for (i = 0; i < dev->num_tx_queues; i++) { | 
 | 2222 | 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2223 |  | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2224 | 		/* No need to grab the _xmit_lock here.  If the | 
 | 2225 | 		 * queue is not stopped for another reason, we | 
 | 2226 | 		 * force a schedule. | 
 | 2227 | 		 */ | 
 | 2228 | 		clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 2229 | 		netif_schedule_queue(txq); | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2230 | 	} | 
 | 2231 | 	spin_unlock(&dev->tx_global_lock); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2232 | } | 
 | 2233 |  | 
 | 2234 | static inline void netif_tx_unlock_bh(struct net_device *dev) | 
 | 2235 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2236 | 	netif_tx_unlock(dev); | 
 | 2237 | 	local_bh_enable(); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2238 | } | 
 | 2239 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2240 | #define HARD_TX_LOCK(dev, txq, cpu) {			\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2241 | 	if ((dev->features & NETIF_F_LLTX) == 0) {	\ | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2242 | 		__netif_tx_lock(txq, cpu);		\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2243 | 	}						\ | 
 | 2244 | } | 
 | 2245 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2246 | #define HARD_TX_UNLOCK(dev, txq) {			\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2247 | 	if ((dev->features & NETIF_F_LLTX) == 0) {	\ | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2248 | 		__netif_tx_unlock(txq);			\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2249 | 	}						\ | 
 | 2250 | } | 
 | 2251 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2252 | static inline void netif_tx_disable(struct net_device *dev) | 
 | 2253 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2254 | 	unsigned int i; | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2255 | 	int cpu; | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2256 |  | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2257 | 	local_bh_disable(); | 
 | 2258 | 	cpu = smp_processor_id(); | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2259 | 	for (i = 0; i < dev->num_tx_queues; i++) { | 
 | 2260 | 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2261 |  | 
 | 2262 | 		__netif_tx_lock(txq, cpu); | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2263 | 		netif_tx_stop_queue(txq); | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2264 | 		__netif_tx_unlock(txq); | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2265 | 	} | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2266 | 	local_bh_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2267 | } | 
 | 2268 |  | 
| David S. Miller | e308a5d | 2008-07-15 00:13:44 -0700 | [diff] [blame] | 2269 | static inline void netif_addr_lock(struct net_device *dev) | 
 | 2270 | { | 
 | 2271 | 	spin_lock(&dev->addr_list_lock); | 
 | 2272 | } | 
 | 2273 |  | 
 | 2274 | static inline void netif_addr_lock_bh(struct net_device *dev) | 
 | 2275 | { | 
 | 2276 | 	spin_lock_bh(&dev->addr_list_lock); | 
 | 2277 | } | 
 | 2278 |  | 
 | 2279 | static inline void netif_addr_unlock(struct net_device *dev) | 
 | 2280 | { | 
 | 2281 | 	spin_unlock(&dev->addr_list_lock); | 
 | 2282 | } | 
 | 2283 |  | 
 | 2284 | static inline void netif_addr_unlock_bh(struct net_device *dev) | 
 | 2285 | { | 
 | 2286 | 	spin_unlock_bh(&dev->addr_list_lock); | 
 | 2287 | } | 
 | 2288 |  | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2289 | /* | 
| Jiri Pirko | 31278e7 | 2009-06-17 01:12:19 +0000 | [diff] [blame] | 2290 |  * dev_addrs walker. Should be used only for read access. Call with | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2291 |  * rcu_read_lock held. | 
 | 2292 |  */ | 
 | 2293 | #define for_each_dev_addr(dev, ha) \ | 
| Jiri Pirko | 31278e7 | 2009-06-17 01:12:19 +0000 | [diff] [blame] | 2294 | 		list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2295 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2296 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | 
 | 2297 |  | 
 | 2298 | extern void		ether_setup(struct net_device *dev); | 
 | 2299 |  | 
 | 2300 | /* Support for loadable net-drivers */ | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 2301 | extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2302 | 				       void (*setup)(struct net_device *), | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 2303 | 				       unsigned int txqs, unsigned int rxqs); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2304 | #define alloc_netdev(sizeof_priv, name, setup) \ | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 2305 | 	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) | 
 | 2306 |  | 
 | 2307 | #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ | 
 | 2308 | 	alloc_netdev_mqs(sizeof_priv, name, setup, count, count) | 
 | 2309 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2310 | extern int		register_netdev(struct net_device *dev); | 
 | 2311 | extern void		unregister_netdev(struct net_device *dev); | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2312 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 2313 | /* General hardware address lists handling functions */ | 
 | 2314 | extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | 
 | 2315 | 				  struct netdev_hw_addr_list *from_list, | 
 | 2316 | 				  int addr_len, unsigned char addr_type); | 
 | 2317 | extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | 
 | 2318 | 				   struct netdev_hw_addr_list *from_list, | 
 | 2319 | 				   int addr_len, unsigned char addr_type); | 
 | 2320 | extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | 
 | 2321 | 			  struct netdev_hw_addr_list *from_list, | 
 | 2322 | 			  int addr_len); | 
 | 2323 | extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | 
 | 2324 | 			     struct netdev_hw_addr_list *from_list, | 
 | 2325 | 			     int addr_len); | 
 | 2326 | extern void __hw_addr_flush(struct netdev_hw_addr_list *list); | 
 | 2327 | extern void __hw_addr_init(struct netdev_hw_addr_list *list); | 
 | 2328 |  | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2329 | /* Functions used for device addresses handling */ | 
 | 2330 | extern int dev_addr_add(struct net_device *dev, unsigned char *addr, | 
 | 2331 | 			unsigned char addr_type); | 
 | 2332 | extern int dev_addr_del(struct net_device *dev, unsigned char *addr, | 
 | 2333 | 			unsigned char addr_type); | 
 | 2334 | extern int dev_addr_add_multiple(struct net_device *to_dev, | 
 | 2335 | 				 struct net_device *from_dev, | 
 | 2336 | 				 unsigned char addr_type); | 
 | 2337 | extern int dev_addr_del_multiple(struct net_device *to_dev, | 
 | 2338 | 				 struct net_device *from_dev, | 
 | 2339 | 				 unsigned char addr_type); | 
| Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 2340 | extern void dev_addr_flush(struct net_device *dev); | 
 | 2341 | extern int dev_addr_init(struct net_device *dev); | 
 | 2342 |  | 
 | 2343 | /* Functions used for unicast addresses handling */ | 
 | 2344 | extern int dev_uc_add(struct net_device *dev, unsigned char *addr); | 
 | 2345 | extern int dev_uc_del(struct net_device *dev, unsigned char *addr); | 
 | 2346 | extern int dev_uc_sync(struct net_device *to, struct net_device *from); | 
 | 2347 | extern void dev_uc_unsync(struct net_device *to, struct net_device *from); | 
 | 2348 | extern void dev_uc_flush(struct net_device *dev); | 
 | 2349 | extern void dev_uc_init(struct net_device *dev); | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2350 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 2351 | /* Functions used for multicast addresses handling */ | 
 | 2352 | extern int dev_mc_add(struct net_device *dev, unsigned char *addr); | 
 | 2353 | extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); | 
 | 2354 | extern int dev_mc_del(struct net_device *dev, unsigned char *addr); | 
 | 2355 | extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); | 
 | 2356 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); | 
 | 2357 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | 
 | 2358 | extern void dev_mc_flush(struct net_device *dev); | 
 | 2359 | extern void dev_mc_init(struct net_device *dev); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2360 |  | 
 | 2361 | /* Functions used for secondary unicast and multicast support */ | 
 | 2362 | extern void		dev_set_rx_mode(struct net_device *dev); | 
 | 2363 | extern void		__dev_set_rx_mode(struct net_device *dev); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2364 | extern int		dev_set_promiscuity(struct net_device *dev, int inc); | 
 | 2365 | extern int		dev_set_allmulti(struct net_device *dev, int inc); | 
 | 2366 | extern void		netdev_state_change(struct net_device *dev); | 
| Jiri Pirko | 3ca5b40 | 2010-03-10 10:29:35 +0000 | [diff] [blame] | 2367 | extern int		netdev_bonding_change(struct net_device *dev, | 
| Moni Shoua | 75c7850 | 2009-09-15 02:37:40 -0700 | [diff] [blame] | 2368 | 					      unsigned long event); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2369 | extern void		netdev_features_change(struct net_device *dev); | 
 | 2370 | /* Load a device via the kmod */ | 
 | 2371 | extern void		dev_load(struct net *net, const char *name); | 
 | 2372 | extern void		dev_mcast_init(void); | 
| Ben Hutchings | d775351 | 2010-07-09 09:12:41 +0000 | [diff] [blame] | 2373 | extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | 
 | 2374 | 					       struct rtnl_link_stats64 *storage); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2375 |  | 
 | 2376 | extern int		netdev_max_backlog; | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 2377 | extern int		netdev_tstamp_prequeue; | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2378 | extern int		weight_p; | 
 | 2379 | extern int		netdev_set_master(struct net_device *dev, struct net_device *master); | 
 | 2380 | extern int skb_checksum_help(struct sk_buff *skb); | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2381 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2382 | #ifdef CONFIG_BUG | 
 | 2383 | extern void netdev_rx_csum_fault(struct net_device *dev); | 
 | 2384 | #else | 
 | 2385 | static inline void netdev_rx_csum_fault(struct net_device *dev) | 
 | 2386 | { | 
 | 2387 | } | 
 | 2388 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2389 | /* rx skb timestamps */ | 
 | 2390 | extern void		net_enable_timestamp(void); | 
 | 2391 | extern void		net_disable_timestamp(void); | 
 | 2392 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 2393 | #ifdef CONFIG_PROC_FS | 
 | 2394 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); | 
 | 2395 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | 
 | 2396 | extern void dev_seq_stop(struct seq_file *seq, void *v); | 
 | 2397 | #endif | 
 | 2398 |  | 
| Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 2399 | extern int netdev_class_create_file(struct class_attribute *class_attr); | 
 | 2400 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | 
 | 2401 |  | 
| Johannes Berg | 0460079 | 2010-08-05 17:45:15 +0200 | [diff] [blame] | 2402 | extern struct kobj_ns_type_operations net_ns_type_operations; | 
 | 2403 |  | 
| Stephen Hemminger | cf04a4c7 | 2008-09-30 02:22:14 -0700 | [diff] [blame] | 2404 | extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); | 
| Arjan van de Ven | 6579e57 | 2008-07-21 13:31:48 -0700 | [diff] [blame] | 2405 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 2406 | extern void linkwatch_run_queue(void); | 
 | 2407 |  | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2408 | u32 netdev_increment_features(u32 all, u32 one, u32 mask); | 
| Michał Mirosław | acd1130 | 2011-01-24 15:45:15 -0800 | [diff] [blame] | 2409 | u32 netdev_fix_features(struct net_device *dev, u32 features); | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 2410 |  | 
| Patrick Mullaney | fc4a748 | 2009-12-03 15:59:22 -0800 | [diff] [blame] | 2411 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, | 
 | 2412 | 					struct net_device *dev); | 
 | 2413 |  | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2414 | u32 netif_skb_features(struct sk_buff *skb); | 
| Jesse Gross | 58e998c | 2010-10-29 12:14:55 +0000 | [diff] [blame] | 2415 |  | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2416 | static inline int net_gso_ok(u32 features, int gso_type) | 
| Herbert Xu | bcd7611 | 2006-06-30 13:36:35 -0700 | [diff] [blame] | 2417 | { | 
 | 2418 | 	int feature = gso_type << NETIF_F_GSO_SHIFT; | 
 | 2419 | 	return (features & feature) == feature; | 
 | 2420 | } | 
 | 2421 |  | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2422 | static inline int skb_gso_ok(struct sk_buff *skb, u32 features) | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2423 | { | 
| Herbert Xu | 278b251 | 2009-06-03 21:20:51 -0700 | [diff] [blame] | 2424 | 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) && | 
| David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2425 | 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2426 | } | 
 | 2427 |  | 
| Jesse Gross | fc74121 | 2011-01-09 06:23:32 +0000 | [diff] [blame] | 2428 | static inline int netif_needs_gso(struct sk_buff *skb, int features) | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 2429 | { | 
| Jesse Gross | fc74121 | 2011-01-09 06:23:32 +0000 | [diff] [blame] | 2430 | 	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || | 
 | 2431 | 		unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 2432 | } | 
 | 2433 |  | 
| Peter P Waskiewicz Jr | 82cc1a7 | 2008-03-21 03:43:19 -0700 | [diff] [blame] | 2434 | static inline void netif_set_gso_max_size(struct net_device *dev, | 
 | 2435 | 					  unsigned int size) | 
 | 2436 | { | 
 | 2437 | 	dev->gso_max_size = size; | 
 | 2438 | } | 
 | 2439 |  | 
| Eric Dumazet | acbbc07 | 2010-04-11 06:56:11 +0000 | [diff] [blame] | 2440 | extern int __skb_bond_should_drop(struct sk_buff *skb, | 
 | 2441 | 				  struct net_device *master); | 
| Jiri Pirko | 5d4e039 | 2009-05-28 01:05:00 +0000 | [diff] [blame] | 2442 |  | 
| Eric Dumazet | 0641e4f | 2010-03-18 21:16:45 -0700 | [diff] [blame] | 2443 | static inline int skb_bond_should_drop(struct sk_buff *skb, | 
 | 2444 | 				       struct net_device *master) | 
| David S. Miller | 7ea49ed | 2006-08-14 17:08:36 -0700 | [diff] [blame] | 2445 | { | 
| Eric Dumazet | acbbc07 | 2010-04-11 06:56:11 +0000 | [diff] [blame] | 2446 | 	if (master) | 
 | 2447 | 		return __skb_bond_should_drop(skb, master); | 
| David S. Miller | 7ea49ed | 2006-08-14 17:08:36 -0700 | [diff] [blame] | 2448 | 	return 0; | 
 | 2449 | } | 
 | 2450 |  | 
| Eric W. Biederman | 505d4f7 | 2008-11-07 22:54:20 -0800 | [diff] [blame] | 2451 | extern struct pernet_operations __net_initdata loopback_net_ops; | 
| Patrick McHardy | b1b67dd | 2009-04-20 04:49:28 +0000 | [diff] [blame] | 2452 |  | 
 | 2453 | static inline int dev_ethtool_get_settings(struct net_device *dev, | 
 | 2454 | 					   struct ethtool_cmd *cmd) | 
 | 2455 | { | 
 | 2456 | 	if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings) | 
 | 2457 | 		return -EOPNOTSUPP; | 
 | 2458 | 	return dev->ethtool_ops->get_settings(dev, cmd); | 
 | 2459 | } | 
 | 2460 |  | 
 | 2461 | static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) | 
 | 2462 | { | 
 | 2463 | 	if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) | 
 | 2464 | 		return 0; | 
 | 2465 | 	return dev->ethtool_ops->get_rx_csum(dev); | 
 | 2466 | } | 
 | 2467 |  | 
 | 2468 | static inline u32 dev_ethtool_get_flags(struct net_device *dev) | 
 | 2469 | { | 
 | 2470 | 	if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) | 
 | 2471 | 		return 0; | 
 | 2472 | 	return dev->ethtool_ops->get_flags(dev); | 
 | 2473 | } | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 2474 |  | 
 | 2475 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ | 
 | 2476 |  | 
 | 2477 | /* netdev_printk helpers, similar to dev_printk */ | 
 | 2478 |  | 
 | 2479 | static inline const char *netdev_name(const struct net_device *dev) | 
 | 2480 | { | 
 | 2481 | 	if (dev->reg_state != NETREG_REGISTERED) | 
 | 2482 | 		return "(unregistered net_device)"; | 
 | 2483 | 	return dev->name; | 
 | 2484 | } | 
 | 2485 |  | 
| Joe Perches | 256df2f | 2010-06-27 01:02:35 +0000 | [diff] [blame] | 2486 | extern int netdev_printk(const char *level, const struct net_device *dev, | 
 | 2487 | 			 const char *format, ...) | 
 | 2488 | 	__attribute__ ((format (printf, 3, 4))); | 
 | 2489 | extern int netdev_emerg(const struct net_device *dev, const char *format, ...) | 
 | 2490 | 	__attribute__ ((format (printf, 2, 3))); | 
 | 2491 | extern int netdev_alert(const struct net_device *dev, const char *format, ...) | 
 | 2492 | 	__attribute__ ((format (printf, 2, 3))); | 
 | 2493 | extern int netdev_crit(const struct net_device *dev, const char *format, ...) | 
 | 2494 | 	__attribute__ ((format (printf, 2, 3))); | 
 | 2495 | extern int netdev_err(const struct net_device *dev, const char *format, ...) | 
 | 2496 | 	__attribute__ ((format (printf, 2, 3))); | 
 | 2497 | extern int netdev_warn(const struct net_device *dev, const char *format, ...) | 
 | 2498 | 	__attribute__ ((format (printf, 2, 3))); | 
 | 2499 | extern int netdev_notice(const struct net_device *dev, const char *format, ...) | 
 | 2500 | 	__attribute__ ((format (printf, 2, 3))); | 
 | 2501 | extern int netdev_info(const struct net_device *dev, const char *format, ...) | 
 | 2502 | 	__attribute__ ((format (printf, 2, 3))); | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 2503 |  | 
 | 2504 | #if defined(DEBUG) | 
 | 2505 | #define netdev_dbg(__dev, format, args...)			\ | 
 | 2506 | 	netdev_printk(KERN_DEBUG, __dev, format, ##args) | 
 | 2507 | #elif defined(CONFIG_DYNAMIC_DEBUG) | 
 | 2508 | #define netdev_dbg(__dev, format, args...)			\ | 
 | 2509 | do {								\ | 
 | 2510 | 	dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,	\ | 
 | 2511 | 			netdev_name(__dev), ##args);		\ | 
 | 2512 | } while (0) | 
 | 2513 | #else | 
 | 2514 | #define netdev_dbg(__dev, format, args...)			\ | 
 | 2515 | ({								\ | 
 | 2516 | 	if (0)							\ | 
 | 2517 | 		netdev_printk(KERN_DEBUG, __dev, format, ##args); \ | 
 | 2518 | 	0;							\ | 
 | 2519 | }) | 
 | 2520 | #endif | 
 | 2521 |  | 
 | 2522 | #if defined(VERBOSE_DEBUG) | 
 | 2523 | #define netdev_vdbg	netdev_dbg | 
 | 2524 | #else | 
 | 2525 |  | 
 | 2526 | #define netdev_vdbg(dev, format, args...)			\ | 
 | 2527 | ({								\ | 
 | 2528 | 	if (0)							\ | 
 | 2529 | 		netdev_printk(KERN_DEBUG, dev, format, ##args);	\ | 
 | 2530 | 	0;							\ | 
 | 2531 | }) | 
 | 2532 | #endif | 
 | 2533 |  | 
 | 2534 | /* | 
 | 2535 |  * netdev_WARN() acts like dev_printk(), but with the key difference | 
 | 2536 |  * of using a WARN/WARN_ON to get the message out, including the | 
 | 2537 |  * file/line information and a backtrace. | 
 | 2538 |  */ | 
 | 2539 | #define netdev_WARN(dev, format, args...)			\ | 
 | 2540 | 	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); | 
 | 2541 |  | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2542 | /* netif printk helpers, similar to netdev_printk */ | 
 | 2543 |  | 
 | 2544 | #define netif_printk(priv, type, level, dev, fmt, args...)	\ | 
 | 2545 | do {					  			\ | 
 | 2546 | 	if (netif_msg_##type(priv))				\ | 
 | 2547 | 		netdev_printk(level, (dev), fmt, ##args);	\ | 
 | 2548 | } while (0) | 
 | 2549 |  | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2550 | #define netif_level(level, priv, type, dev, fmt, args...)	\ | 
 | 2551 | do {								\ | 
 | 2552 | 	if (netif_msg_##type(priv))				\ | 
 | 2553 | 		netdev_##level(dev, fmt, ##args);		\ | 
 | 2554 | } while (0) | 
 | 2555 |  | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2556 | #define netif_emerg(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2557 | 	netif_level(emerg, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2558 | #define netif_alert(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2559 | 	netif_level(alert, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2560 | #define netif_crit(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2561 | 	netif_level(crit, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2562 | #define netif_err(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2563 | 	netif_level(err, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2564 | #define netif_warn(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2565 | 	netif_level(warn, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2566 | #define netif_notice(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2567 | 	netif_level(notice, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2568 | #define netif_info(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 2569 | 	netif_level(info, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2570 |  | 
 | 2571 | #if defined(DEBUG) | 
 | 2572 | #define netif_dbg(priv, type, dev, format, args...)		\ | 
 | 2573 | 	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) | 
 | 2574 | #elif defined(CONFIG_DYNAMIC_DEBUG) | 
 | 2575 | #define netif_dbg(priv, type, netdev, format, args...)		\ | 
 | 2576 | do {								\ | 
 | 2577 | 	if (netif_msg_##type(priv))				\ | 
 | 2578 | 		dynamic_dev_dbg((netdev)->dev.parent,		\ | 
 | 2579 | 				"%s: " format,			\ | 
 | 2580 | 				netdev_name(netdev), ##args);	\ | 
 | 2581 | } while (0) | 
 | 2582 | #else | 
 | 2583 | #define netif_dbg(priv, type, dev, format, args...)			\ | 
 | 2584 | ({									\ | 
 | 2585 | 	if (0)								\ | 
 | 2586 | 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | 
 | 2587 | 	0;								\ | 
 | 2588 | }) | 
 | 2589 | #endif | 
 | 2590 |  | 
 | 2591 | #if defined(VERBOSE_DEBUG) | 
| Ben Hutchings | bcfcc45 | 2010-07-02 07:08:44 +0000 | [diff] [blame] | 2592 | #define netif_vdbg	netif_dbg | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2593 | #else | 
 | 2594 | #define netif_vdbg(priv, type, dev, format, args...)		\ | 
 | 2595 | ({								\ | 
 | 2596 | 	if (0)							\ | 
| Ben Hutchings | a4ed89c | 2010-05-18 06:56:32 +0000 | [diff] [blame] | 2597 | 		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 2598 | 	0;							\ | 
 | 2599 | }) | 
 | 2600 | #endif | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 2601 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2602 | #endif /* __KERNEL__ */ | 
 | 2603 |  | 
| Jiri Pirko | 385a154 | 2009-05-27 15:48:07 -0700 | [diff] [blame] | 2604 | #endif	/* _LINUX_NETDEVICE_H */ |