| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
|  | 3 | *		operating system.  INET is implemented using the  BSD Socket | 
|  | 4 | *		interface as the means of communication with the user level. | 
|  | 5 | * | 
|  | 6 | *		Definitions for the Interfaces handler. | 
|  | 7 | * | 
|  | 8 | * Version:	@(#)dev.h	1.0.10	08/12/93 | 
|  | 9 | * | 
| Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 | * Authors:	Ross Biro | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
|  | 12 | *		Corey Minyard <wf-rch!minyard@relay.EU.net> | 
|  | 13 | *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | 
| Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 14 | *		Alan Cox, <alan@lxorguk.ukuu.org.uk> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | *		Bjorn Ekwall. <bj0rn@blox.se> | 
|  | 16 | *              Pekka Riikonen <priikone@poseidon.pspt.fi> | 
|  | 17 | * | 
|  | 18 | *		This program is free software; you can redistribute it and/or | 
|  | 19 | *		modify it under the terms of the GNU General Public License | 
|  | 20 | *		as published by the Free Software Foundation; either version | 
|  | 21 | *		2 of the License, or (at your option) any later version. | 
|  | 22 | * | 
|  | 23 | *		Moved to /usr/include/linux for NET3 | 
|  | 24 | */ | 
|  | 25 | #ifndef _LINUX_NETDEVICE_H | 
|  | 26 | #define _LINUX_NETDEVICE_H | 
|  | 27 |  | 
| Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 28 | #include <linux/pm_qos.h> | 
| Al Viro | d7fe0f2 | 2006-12-03 23:15:30 -0500 | [diff] [blame] | 29 | #include <linux/timer.h> | 
| Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 30 | #include <linux/bug.h> | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 31 | #include <linux/delay.h> | 
| Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 32 | #include <linux/atomic.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/cache.h> | 
|  | 34 | #include <asm/byteorder.h> | 
|  | 35 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/percpu.h> | 
| David S. Miller | 4d5b78c | 2009-05-06 16:52:51 -0700 | [diff] [blame] | 37 | #include <linux/rculist.h> | 
| Chris Leech | db21733 | 2006-06-17 21:24:58 -0700 | [diff] [blame] | 38 | #include <linux/dmaengine.h> | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 39 | #include <linux/workqueue.h> | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 40 | #include <linux/dynamic_queue_limits.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Patrick McHardy | b1b67dd | 2009-04-20 04:49:28 +0000 | [diff] [blame] | 42 | #include <linux/ethtool.h> | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 43 | #include <net/net_namespace.h> | 
| Lennert Buytenhek | cf85d08 | 2008-10-07 13:45:02 +0000 | [diff] [blame] | 44 | #include <net/dsa.h> | 
| Jeff Kirsher | 7a6b6f5 | 2008-11-25 01:02:08 -0800 | [diff] [blame] | 45 | #ifdef CONFIG_DCB | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 46 | #include <net/dcbnl.h> | 
|  | 47 | #endif | 
| Neil Horman | 5bc1421 | 2011-11-22 05:10:51 +0000 | [diff] [blame] | 48 | #include <net/netprio_cgroup.h> | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 49 |  | 
| Michał Mirosław | a59e2ec | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 50 | #include <linux/netdev_features.h> | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 51 | #include <linux/neighbour.h> | 
| David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 52 | #include <uapi/linux/netdevice.h> | 
| Michał Mirosław | a59e2ec | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 53 |  | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 54 | struct netpoll_info; | 
| Paul Gortmaker | 313162d | 2012-01-30 11:46:54 -0500 | [diff] [blame] | 55 | struct device; | 
| Richard Cochran | c1f19b5 | 2010-07-17 08:49:36 +0000 | [diff] [blame] | 56 | struct phy_device; | 
| Johannes Berg | 704232c | 2007-04-23 12:20:05 -0700 | [diff] [blame] | 57 | /* 802.11 specific */ | 
|  | 58 | struct wireless_dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | /* source back-compat hooks */ | 
|  | 60 | #define SET_ETHTOOL_OPS(netdev,ops) \ | 
|  | 61 | ( (netdev)->ethtool_ops = (ops) ) | 
|  | 62 |  | 
| Stanislaw Gruszka | d07d750 | 2013-01-10 23:19:10 +0000 | [diff] [blame] | 63 | extern void netdev_set_default_ethtool_ops(struct net_device *dev, | 
|  | 64 | const struct ethtool_ops *ops); | 
|  | 65 |  | 
| Stefan Assmann | c1f7942 | 2010-07-22 02:50:21 +0000 | [diff] [blame] | 66 | /* hardware address assignment types */ | 
|  | 67 | #define NET_ADDR_PERM		0	/* address is permanent (default) */ | 
|  | 68 | #define NET_ADDR_RANDOM		1	/* address is generated randomly */ | 
|  | 69 | #define NET_ADDR_STOLEN		2	/* address is stolen from other device */ | 
| Jiri Pirko | fbdeca2 | 2013-01-01 03:30:16 +0000 | [diff] [blame] | 70 | #define NET_ADDR_SET		3	/* address is set using | 
|  | 71 | * dev_set_mac_address() */ | 
| Stefan Assmann | c1f7942 | 2010-07-22 02:50:21 +0000 | [diff] [blame] | 72 |  | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 73 | /* Backlog congestion levels */ | 
|  | 74 | #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */ | 
|  | 75 | #define NET_RX_DROP		1	/* packet dropped */ | 
|  | 76 |  | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 77 | /* | 
|  | 78 | * Transmit return codes: transmit return codes originate from three different | 
|  | 79 | * namespaces: | 
|  | 80 | * | 
|  | 81 | * - qdisc return codes | 
|  | 82 | * - driver transmit return codes | 
|  | 83 | * - errno values | 
|  | 84 | * | 
|  | 85 | * Drivers are allowed to return any one of those in their hard_start_xmit() | 
|  | 86 | * function. Real network devices commonly used with qdiscs should only return | 
|  | 87 | * the driver transmit return codes though - when qdiscs are used, the actual | 
|  | 88 | * transmission happens asynchronously, so the value is not propagated to | 
|  | 89 | * higher layers. Virtual network devices transmit synchronously, in this case | 
|  | 90 | * the driver transmit return codes are consumed by dev_queue_xmit(), all | 
|  | 91 | * others are propagated to higher layers. | 
|  | 92 | */ | 
|  | 93 |  | 
|  | 94 | /* qdisc ->enqueue() return codes. */ | 
|  | 95 | #define NET_XMIT_SUCCESS	0x00 | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 96 | #define NET_XMIT_DROP		0x01	/* skb dropped			*/ | 
|  | 97 | #define NET_XMIT_CN		0x02	/* congestion notification	*/ | 
|  | 98 | #define NET_XMIT_POLICED	0x03	/* skb is shot by police	*/ | 
|  | 99 | #define NET_XMIT_MASK		0x0f	/* qdisc flags in net/sch_generic.h */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 |  | 
| Gerrit Renker | b9df3cb | 2006-11-14 11:21:36 -0200 | [diff] [blame] | 101 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It | 
|  | 102 | * indicates that the device will soon be dropping packets, or already drops | 
|  | 103 | * some packets of the same priority; prompting us to send less aggressively. */ | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 104 | #define net_xmit_eval(e)	((e) == NET_XMIT_CN ? 0 : (e)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0) | 
|  | 106 |  | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 107 | /* Driver transmit return codes */ | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 108 | #define NETDEV_TX_MASK		0xf0 | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 109 |  | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 110 | enum netdev_tx { | 
| Patrick McHardy | 572a9d7 | 2009-11-10 06:14:14 +0000 | [diff] [blame] | 111 | __NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */ | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 112 | NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */ | 
|  | 113 | NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/ | 
|  | 114 | NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */ | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 115 | }; | 
|  | 116 | typedef enum netdev_tx netdev_tx_t; | 
|  | 117 |  | 
| Jarek Poplawski | 9a1654b | 2009-11-15 07:20:12 +0000 | [diff] [blame] | 118 | /* | 
|  | 119 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | 
|  | 120 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | 
|  | 121 | */ | 
|  | 122 | static inline bool dev_xmit_complete(int rc) | 
|  | 123 | { | 
|  | 124 | /* | 
|  | 125 | * Positive cases with an skb consumed by a driver: | 
|  | 126 | * - successful transmission (rc == NETDEV_TX_OK) | 
|  | 127 | * - error while transmitting (rc < 0) | 
|  | 128 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | 
|  | 129 | */ | 
|  | 130 | if (likely(rc < NET_XMIT_MASK)) | 
|  | 131 | return true; | 
|  | 132 |  | 
|  | 133 | return false; | 
|  | 134 | } | 
|  | 135 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | /* | 
|  | 137 | *	Compute the worst case header length according to the protocols | 
|  | 138 | *	used. | 
|  | 139 | */ | 
| Graf Yang | fe2918b | 2009-02-05 21:26:19 -0800 | [diff] [blame] | 140 |  | 
| Ben Hutchings | d11ead7 | 2011-11-25 14:40:26 +0000 | [diff] [blame] | 141 | #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) | 
| David S. Miller | 8388e3d | 2008-05-12 20:17:33 -0700 | [diff] [blame] | 142 | # if defined(CONFIG_MAC80211_MESH) | 
|  | 143 | #  define LL_MAX_HEADER 128 | 
|  | 144 | # else | 
|  | 145 | #  define LL_MAX_HEADER 96 | 
|  | 146 | # endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | #else | 
| David S. Miller | 8388e3d | 2008-05-12 20:17:33 -0700 | [diff] [blame] | 148 | # define LL_MAX_HEADER 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | #endif | 
|  | 150 |  | 
| Ben Hutchings | d11ead7 | 2011-11-25 14:40:26 +0000 | [diff] [blame] | 151 | #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ | 
|  | 152 | !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | #define MAX_HEADER LL_MAX_HEADER | 
|  | 154 | #else | 
|  | 155 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 
|  | 156 | #endif | 
|  | 157 |  | 
|  | 158 | /* | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 159 | *	Old network device statistics. Fields are native words | 
|  | 160 | *	(unsigned long) so they can be read and written atomically. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | */ | 
| Graf Yang | fe2918b | 2009-02-05 21:26:19 -0800 | [diff] [blame] | 162 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 163 | struct net_device_stats { | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 164 | unsigned long	rx_packets; | 
|  | 165 | unsigned long	tx_packets; | 
|  | 166 | unsigned long	rx_bytes; | 
|  | 167 | unsigned long	tx_bytes; | 
|  | 168 | unsigned long	rx_errors; | 
|  | 169 | unsigned long	tx_errors; | 
|  | 170 | unsigned long	rx_dropped; | 
|  | 171 | unsigned long	tx_dropped; | 
|  | 172 | unsigned long	multicast; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | unsigned long	collisions; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | unsigned long	rx_length_errors; | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 175 | unsigned long	rx_over_errors; | 
|  | 176 | unsigned long	rx_crc_errors; | 
|  | 177 | unsigned long	rx_frame_errors; | 
|  | 178 | unsigned long	rx_fifo_errors; | 
|  | 179 | unsigned long	rx_missed_errors; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | unsigned long	tx_aborted_errors; | 
|  | 181 | unsigned long	tx_carrier_errors; | 
|  | 182 | unsigned long	tx_fifo_errors; | 
|  | 183 | unsigned long	tx_heartbeat_errors; | 
|  | 184 | unsigned long	tx_window_errors; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | unsigned long	rx_compressed; | 
|  | 186 | unsigned long	tx_compressed; | 
|  | 187 | }; | 
|  | 188 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 |  | 
|  | 190 | #include <linux/cache.h> | 
|  | 191 | #include <linux/skbuff.h> | 
|  | 192 |  | 
| Eric Dumazet | adc9300 | 2011-11-17 03:13:26 +0000 | [diff] [blame] | 193 | #ifdef CONFIG_RPS | 
| Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 194 | #include <linux/static_key.h> | 
|  | 195 | extern struct static_key rps_needed; | 
| Eric Dumazet | adc9300 | 2011-11-17 03:13:26 +0000 | [diff] [blame] | 196 | #endif | 
|  | 197 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | struct neighbour; | 
|  | 199 | struct neigh_parms; | 
|  | 200 | struct sk_buff; | 
|  | 201 |  | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 202 | struct netdev_hw_addr { | 
|  | 203 | struct list_head	list; | 
|  | 204 | unsigned char		addr[MAX_ADDR_LEN]; | 
|  | 205 | unsigned char		type; | 
| Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 206 | #define NETDEV_HW_ADDR_T_LAN		1 | 
|  | 207 | #define NETDEV_HW_ADDR_T_SAN		2 | 
|  | 208 | #define NETDEV_HW_ADDR_T_SLAVE		3 | 
|  | 209 | #define NETDEV_HW_ADDR_T_UNICAST	4 | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 210 | #define NETDEV_HW_ADDR_T_MULTICAST	5 | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 211 | bool			global_use; | 
| Vlad Yasevich | 4cd729b0 | 2013-04-15 09:54:25 +0000 | [diff] [blame] | 212 | int			sync_cnt; | 
| Eric Dumazet | 8f8f103 | 2010-09-19 11:24:02 -0700 | [diff] [blame] | 213 | int			refcount; | 
| Vlad Yasevich | 4543fbe | 2013-04-02 17:10:07 -0400 | [diff] [blame] | 214 | int			synced; | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 215 | struct rcu_head		rcu_head; | 
|  | 216 | }; | 
|  | 217 |  | 
| Jiri Pirko | 31278e7 | 2009-06-17 01:12:19 +0000 | [diff] [blame] | 218 | struct netdev_hw_addr_list { | 
|  | 219 | struct list_head	list; | 
|  | 220 | int			count; | 
|  | 221 | }; | 
|  | 222 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 223 | #define netdev_hw_addr_list_count(l) ((l)->count) | 
|  | 224 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) | 
|  | 225 | #define netdev_hw_addr_list_for_each(ha, l) \ | 
|  | 226 | list_for_each_entry(ha, &(l)->list, list) | 
|  | 227 |  | 
|  | 228 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) | 
|  | 229 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) | 
| Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 230 | #define netdev_for_each_uc_addr(ha, dev) \ | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 231 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) | 
| Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 232 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 233 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) | 
|  | 234 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) | 
| Pavel Roskin | 18e225f | 2010-04-07 16:40:09 -0700 | [diff] [blame] | 235 | #define netdev_for_each_mc_addr(ha, dev) \ | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 236 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) | 
| Jiri Pirko | 6683ece | 2010-02-04 10:22:25 -0800 | [diff] [blame] | 237 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 238 | struct hh_cache { | 
| David S. Miller | f6b72b6 | 2011-07-14 07:53:20 -0700 | [diff] [blame] | 239 | u16		hh_len; | 
| David S. Miller | 5c25f68 | 2011-07-13 00:51:10 -0700 | [diff] [blame] | 240 | u16		__pad; | 
| Stephen Hemminger | 3644f0c | 2006-12-07 15:08:17 -0800 | [diff] [blame] | 241 | seqlock_t	hh_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 |  | 
|  | 243 | /* cached hardware header; allow for machine alignment needs.        */ | 
|  | 244 | #define HH_DATA_MOD	16 | 
|  | 245 | #define HH_DATA_OFF(__len) \ | 
| Jiri Benc | 5ba0eac | 2005-06-02 16:48:05 -0700 | [diff] [blame] | 246 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | #define HH_DATA_ALIGN(__len) \ | 
|  | 248 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | 
|  | 249 | unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 
|  | 250 | }; | 
|  | 251 |  | 
|  | 252 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 
|  | 253 | * Alternative is: | 
|  | 254 | *   dev->hard_header_len ? (dev->hard_header_len + | 
|  | 255 | *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | 
|  | 256 | * | 
|  | 257 | * We could use other alignment values, but we must maintain the | 
|  | 258 | * relationship HH alignment <= LL alignment. | 
|  | 259 | */ | 
|  | 260 | #define LL_RESERVED_SPACE(dev) \ | 
| Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 261 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ | 
| Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 263 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 |  | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 265 | struct header_ops { | 
|  | 266 | int	(*create) (struct sk_buff *skb, struct net_device *dev, | 
|  | 267 | unsigned short type, const void *daddr, | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 268 | const void *saddr, unsigned int len); | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 269 | int	(*parse)(const struct sk_buff *skb, unsigned char *haddr); | 
|  | 270 | int	(*rebuild)(struct sk_buff *skb); | 
| David S. Miller | e69dd33 | 2011-07-12 23:28:12 -0700 | [diff] [blame] | 271 | int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 272 | void	(*cache_update)(struct hh_cache *hh, | 
|  | 273 | const struct net_device *dev, | 
|  | 274 | const unsigned char *haddr); | 
|  | 275 | }; | 
|  | 276 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | /* These flag bits are private to the generic network queueing | 
|  | 278 | * layer, they may not be explicitly referenced by any other | 
|  | 279 | * code. | 
|  | 280 | */ | 
|  | 281 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 282 | enum netdev_state_t { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | __LINK_STATE_START, | 
|  | 284 | __LINK_STATE_PRESENT, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | __LINK_STATE_NOCARRIER, | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 286 | __LINK_STATE_LINKWATCH_PENDING, | 
|  | 287 | __LINK_STATE_DORMANT, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | }; | 
|  | 289 |  | 
|  | 290 |  | 
|  | 291 | /* | 
|  | 292 | * This structure holds at boot time configured netdevice settings. They | 
| Graf Yang | fe2918b | 2009-02-05 21:26:19 -0800 | [diff] [blame] | 293 | * are then used in the device probing. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | */ | 
|  | 295 | struct netdev_boot_setup { | 
|  | 296 | char name[IFNAMSIZ]; | 
|  | 297 | struct ifmap map; | 
|  | 298 | }; | 
|  | 299 | #define NETDEV_BOOT_SETUP_MAX 8 | 
|  | 300 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 301 | extern int __init netdev_boot_setup(char *str); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 |  | 
|  | 303 | /* | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 304 | * Structure for NAPI scheduling similar to tasklet but with weighting | 
|  | 305 | */ | 
|  | 306 | struct napi_struct { | 
|  | 307 | /* The poll_list must only be managed by the entity which | 
|  | 308 | * changes the state of the NAPI_STATE_SCHED bit.  This means | 
|  | 309 | * whoever atomically sets that bit can add this napi_struct | 
|  | 310 | * to the per-cpu poll_list, and whoever clears that bit | 
|  | 311 | * can remove from the list right before clearing the bit. | 
|  | 312 | */ | 
|  | 313 | struct list_head	poll_list; | 
|  | 314 |  | 
|  | 315 | unsigned long		state; | 
|  | 316 | int			weight; | 
| Eric Dumazet | 404f7c9 | 2012-09-26 07:07:47 +0000 | [diff] [blame] | 317 | unsigned int		gro_count; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 318 | int			(*poll)(struct napi_struct *, int); | 
|  | 319 | #ifdef CONFIG_NETPOLL | 
|  | 320 | spinlock_t		poll_lock; | 
|  | 321 | int			poll_owner; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 322 | #endif | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 323 | struct net_device	*dev; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 324 | struct sk_buff		*gro_list; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 325 | struct sk_buff		*skb; | 
| Eric Dumazet | 404f7c9 | 2012-09-26 07:07:47 +0000 | [diff] [blame] | 326 | struct list_head	dev_list; | 
| Eliezer Tamir | af12fa6 | 2013-06-10 11:39:41 +0300 | [diff] [blame] | 327 | struct hlist_node	napi_hash_node; | 
|  | 328 | unsigned int		napi_id; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 329 | }; | 
|  | 330 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 331 | enum { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 332 | NAPI_STATE_SCHED,	/* Poll is scheduled */ | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 333 | NAPI_STATE_DISABLE,	/* Disable pending */ | 
| Neil Horman | 7b363e4 | 2008-12-09 23:22:26 -0800 | [diff] [blame] | 334 | NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */ | 
| Eliezer Tamir | af12fa6 | 2013-06-10 11:39:41 +0300 | [diff] [blame] | 335 | NAPI_STATE_HASHED,	/* In NAPI hash */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 336 | }; | 
|  | 337 |  | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 338 | enum gro_result { | 
| Herbert Xu | d1c76af | 2009-03-16 10:50:02 -0700 | [diff] [blame] | 339 | GRO_MERGED, | 
|  | 340 | GRO_MERGED_FREE, | 
|  | 341 | GRO_HELD, | 
|  | 342 | GRO_NORMAL, | 
|  | 343 | GRO_DROP, | 
|  | 344 | }; | 
| Ben Hutchings | 5b252f0 | 2009-10-29 07:17:09 +0000 | [diff] [blame] | 345 | typedef enum gro_result gro_result_t; | 
| Herbert Xu | d1c76af | 2009-03-16 10:50:02 -0700 | [diff] [blame] | 346 |  | 
| Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 347 | /* | 
|  | 348 | * enum rx_handler_result - Possible return values for rx_handlers. | 
|  | 349 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it | 
|  | 350 | * further. | 
|  | 351 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | 
|  | 352 | * case skb->dev was changed by rx_handler. | 
|  | 353 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | 
|  | 354 | * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. | 
|  | 355 | * | 
|  | 356 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | 
|  | 357 | * special processing of the skb, prior to delivery to protocol handlers. | 
|  | 358 | * | 
|  | 359 | * Currently, a net_device can only have a single rx_handler registered. Trying | 
|  | 360 | * to register a second rx_handler will return -EBUSY. | 
|  | 361 | * | 
|  | 362 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). | 
|  | 363 | * To unregister a rx_handler on a net_device, use | 
|  | 364 | * netdev_rx_handler_unregister(). | 
|  | 365 | * | 
|  | 366 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | 
|  | 367 | * do with the skb. | 
|  | 368 | * | 
|  | 369 | * If the rx_handler consumed to skb in some way, it should return | 
|  | 370 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for | 
|  | 371 | * the skb to be delivered in some other ways. | 
|  | 372 | * | 
|  | 373 | * If the rx_handler changed skb->dev, to divert the skb to another | 
|  | 374 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | 
|  | 375 | * new device will be called if it exists. | 
|  | 376 | * | 
|  | 377 | * If the rx_handler consider the skb should be ignored, it should return | 
|  | 378 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that | 
| Adam Buchbinder | d93cf06 | 2012-09-19 21:47:58 -0400 | [diff] [blame] | 379 | * are registered on exact device (ptype->dev == skb->dev). | 
| Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 380 | * | 
|  | 381 | * If the rx_handler didn't changed skb->dev, but want the skb to be normally | 
|  | 382 | * delivered, it should return RX_HANDLER_PASS. | 
|  | 383 | * | 
|  | 384 | * A device without a registered rx_handler will behave as if rx_handler | 
|  | 385 | * returned RX_HANDLER_PASS. | 
|  | 386 | */ | 
|  | 387 |  | 
|  | 388 | enum rx_handler_result { | 
|  | 389 | RX_HANDLER_CONSUMED, | 
|  | 390 | RX_HANDLER_ANOTHER, | 
|  | 391 | RX_HANDLER_EXACT, | 
|  | 392 | RX_HANDLER_PASS, | 
|  | 393 | }; | 
|  | 394 | typedef enum rx_handler_result rx_handler_result_t; | 
|  | 395 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 396 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 397 | extern void __napi_schedule(struct napi_struct *n); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 398 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 399 | static inline bool napi_disable_pending(struct napi_struct *n) | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 400 | { | 
|  | 401 | return test_bit(NAPI_STATE_DISABLE, &n->state); | 
|  | 402 | } | 
|  | 403 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 404 | /** | 
|  | 405 | *	napi_schedule_prep - check if napi can be scheduled | 
|  | 406 | *	@n: napi context | 
|  | 407 | * | 
|  | 408 | * Test if NAPI routine is already running, and if not mark | 
|  | 409 | * it as running.  This is used as a condition variable | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 410 | * insure only one NAPI poll instance runs.  We also make | 
|  | 411 | * sure there is no pending NAPI disable. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 412 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 413 | static inline bool napi_schedule_prep(struct napi_struct *n) | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 414 | { | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 415 | return !napi_disable_pending(n) && | 
|  | 416 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 417 | } | 
|  | 418 |  | 
|  | 419 | /** | 
|  | 420 | *	napi_schedule - schedule NAPI poll | 
|  | 421 | *	@n: napi context | 
|  | 422 | * | 
|  | 423 | * Schedule NAPI poll routine to be called if it is not already | 
|  | 424 | * running. | 
|  | 425 | */ | 
|  | 426 | static inline void napi_schedule(struct napi_struct *n) | 
|  | 427 | { | 
|  | 428 | if (napi_schedule_prep(n)) | 
|  | 429 | __napi_schedule(n); | 
|  | 430 | } | 
|  | 431 |  | 
| Roland Dreier | bfe13f5 | 2007-10-09 15:47:37 -0700 | [diff] [blame] | 432 | /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 433 | static inline bool napi_reschedule(struct napi_struct *napi) | 
| Roland Dreier | bfe13f5 | 2007-10-09 15:47:37 -0700 | [diff] [blame] | 434 | { | 
|  | 435 | if (napi_schedule_prep(napi)) { | 
|  | 436 | __napi_schedule(napi); | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 437 | return true; | 
| Roland Dreier | bfe13f5 | 2007-10-09 15:47:37 -0700 | [diff] [blame] | 438 | } | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 439 | return false; | 
| Roland Dreier | bfe13f5 | 2007-10-09 15:47:37 -0700 | [diff] [blame] | 440 | } | 
|  | 441 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 442 | /** | 
|  | 443 | *	napi_complete - NAPI processing complete | 
|  | 444 | *	@n: napi context | 
|  | 445 | * | 
|  | 446 | * Mark NAPI processing as complete. | 
|  | 447 | */ | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 448 | extern void __napi_complete(struct napi_struct *n); | 
|  | 449 | extern void napi_complete(struct napi_struct *n); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 450 |  | 
|  | 451 | /** | 
| Eliezer Tamir | af12fa6 | 2013-06-10 11:39:41 +0300 | [diff] [blame] | 452 | *	napi_by_id - lookup a NAPI by napi_id | 
|  | 453 | *	@napi_id: hashed napi_id | 
|  | 454 | * | 
|  | 455 | * lookup @napi_id in napi_hash table | 
|  | 456 | * must be called under rcu_read_lock() | 
|  | 457 | */ | 
|  | 458 | extern struct napi_struct *napi_by_id(unsigned int napi_id); | 
|  | 459 |  | 
|  | 460 | /** | 
|  | 461 | *	napi_hash_add - add a NAPI to global hashtable | 
|  | 462 | *	@napi: napi context | 
|  | 463 | * | 
|  | 464 | * generate a new napi_id and store a @napi under it in napi_hash | 
|  | 465 | */ | 
|  | 466 | extern void napi_hash_add(struct napi_struct *napi); | 
|  | 467 |  | 
|  | 468 | /** | 
|  | 469 | *	napi_hash_del - remove a NAPI from global table | 
|  | 470 | *	@napi: napi context | 
|  | 471 | * | 
|  | 472 | * Warning: caller must observe rcu grace period | 
|  | 473 | * before freeing memory containing @napi | 
|  | 474 | */ | 
|  | 475 | extern void napi_hash_del(struct napi_struct *napi); | 
|  | 476 |  | 
|  | 477 | /** | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 478 | *	napi_disable - prevent NAPI from scheduling | 
|  | 479 | *	@n: napi context | 
|  | 480 | * | 
|  | 481 | * Stop NAPI from being scheduled on this context. | 
|  | 482 | * Waits till any outstanding processing completes. | 
|  | 483 | */ | 
|  | 484 | static inline void napi_disable(struct napi_struct *n) | 
|  | 485 | { | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 486 | set_bit(NAPI_STATE_DISABLE, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 487 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 
| Benjamin Herrenschmidt | 43cc738 | 2007-10-26 04:23:22 -0700 | [diff] [blame] | 488 | msleep(1); | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 489 | clear_bit(NAPI_STATE_DISABLE, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 490 | } | 
|  | 491 |  | 
|  | 492 | /** | 
|  | 493 | *	napi_enable - enable NAPI scheduling | 
|  | 494 | *	@n: napi context | 
|  | 495 | * | 
|  | 496 | * Resume NAPI from being scheduled on this context. | 
|  | 497 | * Must be paired with napi_disable. | 
|  | 498 | */ | 
|  | 499 | static inline void napi_enable(struct napi_struct *n) | 
|  | 500 | { | 
|  | 501 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 
|  | 502 | smp_mb__before_clear_bit(); | 
|  | 503 | clear_bit(NAPI_STATE_SCHED, &n->state); | 
|  | 504 | } | 
|  | 505 |  | 
| Stephen Hemminger | c264c3d | 2007-10-17 13:26:41 -0700 | [diff] [blame] | 506 | #ifdef CONFIG_SMP | 
|  | 507 | /** | 
|  | 508 | *	napi_synchronize - wait until NAPI is not running | 
|  | 509 | *	@n: napi context | 
|  | 510 | * | 
|  | 511 | * Wait until NAPI is done being scheduled on this context. | 
|  | 512 | * Waits till any outstanding processing completes but | 
|  | 513 | * does not disable future activations. | 
|  | 514 | */ | 
|  | 515 | static inline void napi_synchronize(const struct napi_struct *n) | 
|  | 516 | { | 
|  | 517 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | 
|  | 518 | msleep(1); | 
|  | 519 | } | 
|  | 520 | #else | 
|  | 521 | # define napi_synchronize(n)	barrier() | 
|  | 522 | #endif | 
|  | 523 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 524 | enum netdev_queue_state_t { | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 525 | __QUEUE_STATE_DRV_XOFF, | 
|  | 526 | __QUEUE_STATE_STACK_XOFF, | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 527 | __QUEUE_STATE_FROZEN, | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 528 | #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)		| \ | 
|  | 529 | (1 << __QUEUE_STATE_STACK_XOFF)) | 
|  | 530 | #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF		| \ | 
|  | 531 | (1 << __QUEUE_STATE_FROZEN)) | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 532 | }; | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 533 | /* | 
|  | 534 | * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The | 
|  | 535 | * netif_tx_* functions below are used to manipulate this flag.  The | 
|  | 536 | * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit | 
|  | 537 | * queue independently.  The netif_xmit_*stopped functions below are called | 
|  | 538 | * to check if the queue has been stopped by the driver or stack (either | 
|  | 539 | * of the XOFF bits are set in the state).  Drivers should not need to call | 
|  | 540 | * netif_xmit*stopped functions, they should only be using netif_tx_*. | 
|  | 541 | */ | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 542 |  | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 543 | struct netdev_queue { | 
| Eric Dumazet | 6a321cb | 2009-04-28 04:43:42 -0700 | [diff] [blame] | 544 | /* | 
|  | 545 | * read mostly part | 
|  | 546 | */ | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 547 | struct net_device	*dev; | 
| David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 548 | struct Qdisc		*qdisc; | 
|  | 549 | struct Qdisc		*qdisc_sleeping; | 
| david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 550 | #ifdef CONFIG_SYSFS | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 551 | struct kobject		kobj; | 
|  | 552 | #endif | 
| Eric Dumazet | f2cd2d3 | 2010-11-29 08:14:37 +0000 | [diff] [blame] | 553 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | 
|  | 554 | int			numa_node; | 
|  | 555 | #endif | 
| Eric Dumazet | 6a321cb | 2009-04-28 04:43:42 -0700 | [diff] [blame] | 556 | /* | 
|  | 557 | * write mostly part | 
|  | 558 | */ | 
|  | 559 | spinlock_t		_xmit_lock ____cacheline_aligned_in_smp; | 
|  | 560 | int			xmit_lock_owner; | 
| Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 561 | /* | 
|  | 562 | * please use this field instead of dev->trans_start | 
|  | 563 | */ | 
|  | 564 | unsigned long		trans_start; | 
| david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 565 |  | 
|  | 566 | /* | 
|  | 567 | * Number of TX timeouts for this queue | 
|  | 568 | * (/sys/class/net/DEV/Q/trans_timeout) | 
|  | 569 | */ | 
|  | 570 | unsigned long		trans_timeout; | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 571 |  | 
|  | 572 | unsigned long		state; | 
|  | 573 |  | 
|  | 574 | #ifdef CONFIG_BQL | 
|  | 575 | struct dql		dql; | 
|  | 576 | #endif | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 577 | } ____cacheline_aligned_in_smp; | 
| David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 578 |  | 
| Eric Dumazet | f2cd2d3 | 2010-11-29 08:14:37 +0000 | [diff] [blame] | 579 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) | 
|  | 580 | { | 
|  | 581 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | 
|  | 582 | return q->numa_node; | 
|  | 583 | #else | 
| Changli Gao | b236da6 | 2010-12-14 03:09:15 +0000 | [diff] [blame] | 584 | return NUMA_NO_NODE; | 
| Eric Dumazet | f2cd2d3 | 2010-11-29 08:14:37 +0000 | [diff] [blame] | 585 | #endif | 
|  | 586 | } | 
|  | 587 |  | 
|  | 588 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) | 
|  | 589 | { | 
|  | 590 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | 
|  | 591 | q->numa_node = node; | 
|  | 592 | #endif | 
|  | 593 | } | 
|  | 594 |  | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 595 | #ifdef CONFIG_RPS | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 596 | /* | 
|  | 597 | * This structure holds an RPS map which can be of variable length.  The | 
|  | 598 | * map is an array of CPUs. | 
|  | 599 | */ | 
|  | 600 | struct rps_map { | 
|  | 601 | unsigned int len; | 
|  | 602 | struct rcu_head rcu; | 
|  | 603 | u16 cpus[0]; | 
|  | 604 | }; | 
| Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 605 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 606 |  | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 607 | /* | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 608 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the | 
|  | 609 | * tail pointer for that CPU's input queue at the time of last enqueue, and | 
|  | 610 | * a hardware filter index. | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 611 | */ | 
|  | 612 | struct rps_dev_flow { | 
|  | 613 | u16 cpu; | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 614 | u16 filter; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 615 | unsigned int last_qtail; | 
|  | 616 | }; | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 617 | #define RPS_NO_FILTER 0xffff | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 618 |  | 
|  | 619 | /* | 
|  | 620 | * The rps_dev_flow_table structure contains a table of flow mappings. | 
|  | 621 | */ | 
|  | 622 | struct rps_dev_flow_table { | 
|  | 623 | unsigned int mask; | 
|  | 624 | struct rcu_head rcu; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 625 | struct rps_dev_flow flows[0]; | 
|  | 626 | }; | 
|  | 627 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | 
| Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 628 | ((_num) * sizeof(struct rps_dev_flow))) | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 629 |  | 
|  | 630 | /* | 
|  | 631 | * The rps_sock_flow_table contains mappings of flows to the last CPU | 
|  | 632 | * on which they were processed by the application (set in recvmsg). | 
|  | 633 | */ | 
|  | 634 | struct rps_sock_flow_table { | 
|  | 635 | unsigned int mask; | 
|  | 636 | u16 ents[0]; | 
|  | 637 | }; | 
|  | 638 | #define	RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ | 
| Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 639 | ((_num) * sizeof(u16))) | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 640 |  | 
|  | 641 | #define RPS_NO_CPU 0xffff | 
|  | 642 |  | 
|  | 643 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, | 
|  | 644 | u32 hash) | 
|  | 645 | { | 
|  | 646 | if (table && hash) { | 
|  | 647 | unsigned int cpu, index = hash & table->mask; | 
|  | 648 |  | 
|  | 649 | /* We only give a hint, preemption can change cpu under us */ | 
|  | 650 | cpu = raw_smp_processor_id(); | 
|  | 651 |  | 
|  | 652 | if (table->ents[index] != cpu) | 
|  | 653 | table->ents[index] = cpu; | 
|  | 654 | } | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, | 
|  | 658 | u32 hash) | 
|  | 659 | { | 
|  | 660 | if (table && hash) | 
|  | 661 | table->ents[hash & table->mask] = RPS_NO_CPU; | 
|  | 662 | } | 
|  | 663 |  | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 664 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 665 |  | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 666 | #ifdef CONFIG_RFS_ACCEL | 
|  | 667 | extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | 
|  | 668 | u32 flow_id, u16 filter_id); | 
|  | 669 | #endif | 
|  | 670 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 671 | /* This structure contains an instance of an RX queue. */ | 
|  | 672 | struct netdev_rx_queue { | 
| Eric Dumazet | 6e3f7fa | 2010-10-25 03:02:02 +0000 | [diff] [blame] | 673 | struct rps_map __rcu		*rps_map; | 
|  | 674 | struct rps_dev_flow_table __rcu	*rps_flow_table; | 
|  | 675 | struct kobject			kobj; | 
| Tom Herbert | fe82224 | 2010-11-09 10:47:38 +0000 | [diff] [blame] | 676 | struct net_device		*dev; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 677 | } ____cacheline_aligned_in_smp; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 678 | #endif /* CONFIG_RPS */ | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 679 |  | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 680 | #ifdef CONFIG_XPS | 
|  | 681 | /* | 
|  | 682 | * This structure holds an XPS map which can be of variable length.  The | 
|  | 683 | * map is an array of queues. | 
|  | 684 | */ | 
|  | 685 | struct xps_map { | 
|  | 686 | unsigned int len; | 
|  | 687 | unsigned int alloc_len; | 
|  | 688 | struct rcu_head rcu; | 
|  | 689 | u16 queues[0]; | 
|  | 690 | }; | 
| Eric Dumazet | 60b778c | 2011-12-24 06:56:49 +0000 | [diff] [blame] | 691 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 692 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))	\ | 
|  | 693 | / sizeof(u16)) | 
|  | 694 |  | 
|  | 695 | /* | 
|  | 696 | * This structure holds all XPS maps for device.  Maps are indexed by CPU. | 
|  | 697 | */ | 
|  | 698 | struct xps_dev_maps { | 
|  | 699 | struct rcu_head rcu; | 
| Eric Dumazet | a417786 | 2010-11-28 21:43:02 +0000 | [diff] [blame] | 700 | struct xps_map __rcu *cpu_map[0]; | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 701 | }; | 
|  | 702 | #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\ | 
|  | 703 | (nr_cpu_ids * sizeof(struct xps_map *))) | 
|  | 704 | #endif /* CONFIG_XPS */ | 
|  | 705 |  | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 706 | #define TC_MAX_QUEUE	16 | 
|  | 707 | #define TC_BITMASK	15 | 
|  | 708 | /* HW offloaded queuing disciplines txq count and offset maps */ | 
|  | 709 | struct netdev_tc_txq { | 
|  | 710 | u16 count; | 
|  | 711 | u16 offset; | 
|  | 712 | }; | 
|  | 713 |  | 
| Neerav Parikh | 68bad94 | 2012-01-04 20:23:39 +0000 | [diff] [blame] | 714 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 
|  | 715 | /* | 
|  | 716 | * This structure is to hold information about the device | 
|  | 717 | * configured to run FCoE protocol stack. | 
|  | 718 | */ | 
|  | 719 | struct netdev_fcoe_hbainfo { | 
|  | 720 | char	manufacturer[64]; | 
|  | 721 | char	serial_number[64]; | 
|  | 722 | char	hardware_version[64]; | 
|  | 723 | char	driver_version[64]; | 
|  | 724 | char	optionrom_version[64]; | 
|  | 725 | char	firmware_version[64]; | 
|  | 726 | char	model[256]; | 
|  | 727 | char	model_description[256]; | 
|  | 728 | }; | 
|  | 729 | #endif | 
|  | 730 |  | 
| Jiri Pirko | 66b52b0 | 2013-07-29 18:16:49 +0200 | [diff] [blame] | 731 | #define MAX_PHYS_PORT_ID_LEN 32 | 
|  | 732 |  | 
|  | 733 | /* This structure holds a unique identifier to identify the | 
|  | 734 | * physical port used by a netdevice. | 
|  | 735 | */ | 
|  | 736 | struct netdev_phys_port_id { | 
|  | 737 | unsigned char id[MAX_PHYS_PORT_ID_LEN]; | 
|  | 738 | unsigned char id_len; | 
|  | 739 | }; | 
|  | 740 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 741 | /* | 
|  | 742 | * This structure defines the management hooks for network devices. | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 743 | * The following hooks can be defined; unless noted otherwise, they are | 
|  | 744 | * optional and can be filled with a null pointer. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 745 | * | 
|  | 746 | * int (*ndo_init)(struct net_device *dev); | 
|  | 747 | *     This function is called once when network device is registered. | 
|  | 748 | *     The network device can use this to any late stage initializaton | 
|  | 749 | *     or semantic validattion. It can fail with an error code which will | 
|  | 750 | *     be propogated back to register_netdev | 
|  | 751 | * | 
|  | 752 | * void (*ndo_uninit)(struct net_device *dev); | 
|  | 753 | *     This function is called when device is unregistered or when registration | 
|  | 754 | *     fails. It is not called if init fails. | 
|  | 755 | * | 
|  | 756 | * int (*ndo_open)(struct net_device *dev); | 
|  | 757 | *     This function is called when network device transistions to the up | 
|  | 758 | *     state. | 
|  | 759 | * | 
|  | 760 | * int (*ndo_stop)(struct net_device *dev); | 
|  | 761 | *     This function is called when network device transistions to the down | 
|  | 762 | *     state. | 
|  | 763 | * | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 764 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, | 
|  | 765 | *                               struct net_device *dev); | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 766 | *	Called when a packet needs to be transmitted. | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 767 | *	Must return NETDEV_TX_OK , NETDEV_TX_BUSY. | 
|  | 768 | *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 769 | *	Required can not be NULL. | 
|  | 770 | * | 
|  | 771 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | 
|  | 772 | *	Called to decide which queue to when device supports multiple | 
|  | 773 | *	transmit queues. | 
|  | 774 | * | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 775 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); | 
|  | 776 | *	This function is called to allow device receiver to make | 
|  | 777 | *	changes to configuration when multicast or promiscious is enabled. | 
|  | 778 | * | 
|  | 779 | * void (*ndo_set_rx_mode)(struct net_device *dev); | 
|  | 780 | *	This function is called device changes address list filtering. | 
| Jiri Pirko | 0178934 | 2011-08-16 06:29:00 +0000 | [diff] [blame] | 781 | *	If driver handles unicast address filtering, it should set | 
|  | 782 | *	IFF_UNICAST_FLT to its priv_flags. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 783 | * | 
|  | 784 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | 
|  | 785 | *	This function  is called when the Media Access Control address | 
| Mike Rapoport | 37b607c | 2009-04-27 05:45:54 -0700 | [diff] [blame] | 786 | *	needs to be changed. If this interface is not defined, the | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 787 | *	mac address can not be changed. | 
|  | 788 | * | 
|  | 789 | * int (*ndo_validate_addr)(struct net_device *dev); | 
|  | 790 | *	Test if Media Access Control address is valid for the device. | 
|  | 791 | * | 
|  | 792 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | 
|  | 793 | *	Called when a user request an ioctl which can't be handled by | 
|  | 794 | *	the generic interface code. If not defined ioctl's return | 
|  | 795 | *	not supported error code. | 
|  | 796 | * | 
|  | 797 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | 
|  | 798 | *	Used to set network devices bus interface parameters. This interface | 
|  | 799 | *	is retained for legacy reason, new devices should use the bus | 
|  | 800 | *	interface (PCI) for low level management. | 
|  | 801 | * | 
|  | 802 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | 
|  | 803 | *	Called when a user wants to change the Maximum Transfer Unit | 
|  | 804 | *	of a device. If not defined, any request to change MTU will | 
|  | 805 | *	will return an error. | 
|  | 806 | * | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 807 | * void (*ndo_tx_timeout)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 808 | *	Callback uses when the transmitter has not made any progress | 
|  | 809 | *	for dev->watchdog ticks. | 
|  | 810 | * | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 811 | * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 812 | *                      struct rtnl_link_stats64 *storage); | 
| Wolfram Sang | d308e38 | 2009-10-07 13:53:11 -0700 | [diff] [blame] | 813 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 814 | *	Called when a user wants to get the network device usage | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 815 | *	statistics. Drivers must do one of the following: | 
| Ben Hutchings | 3cfde79 | 2010-07-09 09:11:52 +0000 | [diff] [blame] | 816 | *	1. Define @ndo_get_stats64 to fill in a zero-initialised | 
|  | 817 | *	   rtnl_link_stats64 structure passed by the caller. | 
| Ben Hutchings | 82695d9 | 2010-06-15 15:08:48 -0700 | [diff] [blame] | 818 | *	2. Define @ndo_get_stats to update a net_device_stats structure | 
| Ben Hutchings | be1f3c2 | 2010-06-08 07:19:54 +0000 | [diff] [blame] | 819 | *	   (which should normally be dev->stats) and return a pointer to | 
|  | 820 | *	   it. The structure may be changed asynchronously only if each | 
|  | 821 | *	   field is written atomically. | 
|  | 822 | *	3. Update dev->stats asynchronously and atomically, and define | 
|  | 823 | *	   neither operation. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 824 | * | 
| Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 825 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); | 
|  | 826 | *	If device support VLAN filtering this function is called when a | 
|  | 827 | *	VLAN id is registered. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 828 | * | 
| Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 829 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); | 
| Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 830 | *	If device support VLAN filtering this function is called when a | 
|  | 831 | *	VLAN id is unregistered. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 832 | * | 
|  | 833 | * void (*ndo_poll_controller)(struct net_device *dev); | 
| Williams, Mitch A | 95c26df | 2010-02-10 01:43:46 +0000 | [diff] [blame] | 834 | * | 
|  | 835 | *	SR-IOV management functions. | 
|  | 836 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | 
|  | 837 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); | 
|  | 838 | * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); | 
| Greg Rose | 5f8444a | 2011-10-08 03:05:24 +0000 | [diff] [blame] | 839 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); | 
| Williams, Mitch A | 95c26df | 2010-02-10 01:43:46 +0000 | [diff] [blame] | 840 | * int (*ndo_get_vf_config)(struct net_device *dev, | 
|  | 841 | *			    int vf, struct ifla_vf_info *ivf); | 
| Rony Efraim | 1d8faf4 | 2013-06-13 13:19:10 +0300 | [diff] [blame] | 842 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); | 
| Scott Feldman | 57b6108 | 2010-05-17 22:49:55 -0700 | [diff] [blame] | 843 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, | 
|  | 844 | *			  struct nlattr *port[]); | 
|  | 845 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 846 | * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) | 
|  | 847 | * 	Called to setup 'tc' number of traffic classes in the net device. This | 
|  | 848 | * 	is always called from the stack with the rtnl lock held and netif tx | 
|  | 849 | * 	queues stopped. This allows the netdevice to perform queue management | 
|  | 850 | * 	safely. | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 851 | * | 
| Yi Zou | e9bce84 | 2011-03-09 08:48:03 +0000 | [diff] [blame] | 852 | *	Fiber Channel over Ethernet (FCoE) offload functions. | 
|  | 853 | * int (*ndo_fcoe_enable)(struct net_device *dev); | 
|  | 854 | *	Called when the FCoE protocol stack wants to start using LLD for FCoE | 
|  | 855 | *	so the underlying device can perform whatever needed configuration or | 
|  | 856 | *	initialization to support acceleration of FCoE traffic. | 
|  | 857 | * | 
|  | 858 | * int (*ndo_fcoe_disable)(struct net_device *dev); | 
|  | 859 | *	Called when the FCoE protocol stack wants to stop using LLD for FCoE | 
|  | 860 | *	so the underlying device can perform whatever needed clean-ups to | 
|  | 861 | *	stop supporting acceleration of FCoE traffic. | 
|  | 862 | * | 
|  | 863 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, | 
|  | 864 | *			     struct scatterlist *sgl, unsigned int sgc); | 
|  | 865 | *	Called when the FCoE Initiator wants to initialize an I/O that | 
|  | 866 | *	is a possible candidate for Direct Data Placement (DDP). The LLD can | 
|  | 867 | *	perform necessary setup and returns 1 to indicate the device is set up | 
|  | 868 | *	successfully to perform DDP on this I/O, otherwise this returns 0. | 
|  | 869 | * | 
|  | 870 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid); | 
|  | 871 | *	Called when the FCoE Initiator/Target is done with the DDPed I/O as | 
|  | 872 | *	indicated by the FC exchange id 'xid', so the underlying device can | 
|  | 873 | *	clean up and reuse resources for later DDP requests. | 
|  | 874 | * | 
|  | 875 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, | 
|  | 876 | *			      struct scatterlist *sgl, unsigned int sgc); | 
|  | 877 | *	Called when the FCoE Target wants to initialize an I/O that | 
|  | 878 | *	is a possible candidate for Direct Data Placement (DDP). The LLD can | 
|  | 879 | *	perform necessary setup and returns 1 to indicate the device is set up | 
|  | 880 | *	successfully to perform DDP on this I/O, otherwise this returns 0. | 
|  | 881 | * | 
| Neerav Parikh | 68bad94 | 2012-01-04 20:23:39 +0000 | [diff] [blame] | 882 | * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, | 
|  | 883 | *			       struct netdev_fcoe_hbainfo *hbainfo); | 
|  | 884 | *	Called when the FCoE Protocol stack wants information on the underlying | 
|  | 885 | *	device. This information is utilized by the FCoE protocol stack to | 
|  | 886 | *	register attributes with Fiber Channel management service as per the | 
|  | 887 | *	FC-GS Fabric Device Management Information(FDMI) specification. | 
|  | 888 | * | 
| Yi Zou | e9bce84 | 2011-03-09 08:48:03 +0000 | [diff] [blame] | 889 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); | 
|  | 890 | *	Called when the underlying device wants to override default World Wide | 
|  | 891 | *	Name (WWN) generation mechanism in FCoE protocol stack to pass its own | 
|  | 892 | *	World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE | 
|  | 893 | *	protocol stack to use. | 
|  | 894 | * | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 895 | *	RFS acceleration. | 
|  | 896 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | 
|  | 897 | *			    u16 rxq_index, u32 flow_id); | 
|  | 898 | *	Set hardware filter for RFS.  rxq_index is the target queue index; | 
|  | 899 | *	flow_id is a flow ID to be passed to rps_may_expire_flow() later. | 
|  | 900 | *	Return the filter ID on success, or a negative error code. | 
| Jiri Pirko | fbaec0e | 2011-02-13 10:15:37 +0000 | [diff] [blame] | 901 | * | 
| Jiri Pirko | 8b98a70 | 2013-01-03 22:49:02 +0000 | [diff] [blame] | 902 | *	Slave management functions (for bridge, bonding, etc). | 
| Jiri Pirko | fbaec0e | 2011-02-13 10:15:37 +0000 | [diff] [blame] | 903 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); | 
|  | 904 | *	Called to make another netdev an underling. | 
|  | 905 | * | 
|  | 906 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); | 
|  | 907 | *	Called to release previously enslaved netdev. | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 908 | * | 
|  | 909 | *      Feature/offload setting functions. | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 910 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, | 
|  | 911 | *		netdev_features_t features); | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 912 | *	Adjusts the requested feature flags according to device-specific | 
|  | 913 | *	constraints, and returns the resulting flags. Must not modify | 
|  | 914 | *	the device state. | 
|  | 915 | * | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 916 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 917 | *	Called to update device configuration to new features. Passed | 
|  | 918 | *	feature set might be less than what was returned by ndo_fix_features()). | 
|  | 919 | *	Must return >0 or -errno if it changed dev->features itself. | 
|  | 920 | * | 
| stephen hemminger | edc7d57 | 2012-10-01 12:32:33 +0000 | [diff] [blame] | 921 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], | 
|  | 922 | *		      struct net_device *dev, | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 923 | *		      const unsigned char *addr, u16 flags) | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 924 | *	Adds an FDB entry to dev for addr. | 
| Vlad Yasevich | 1690be6 | 2013-02-13 12:00:18 +0000 | [diff] [blame] | 925 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], | 
|  | 926 | *		      struct net_device *dev, | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 927 | *		      const unsigned char *addr) | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 928 | *	Deletes the FDB entry from dev coresponding to addr. | 
|  | 929 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, | 
|  | 930 | *		       struct net_device *dev, int idx) | 
|  | 931 | *	Used to add FDB entries to dump requests. Implementers should add | 
|  | 932 | *	entries to skb and update idx with the number of entries. | 
| John Fastabend | e5a55a8 | 2012-10-24 08:12:57 +0000 | [diff] [blame] | 933 | * | 
|  | 934 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) | 
|  | 935 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, | 
| Dmitry Kravkov | 24f11a5 | 2013-03-27 06:54:00 +0000 | [diff] [blame] | 936 | *			     struct net_device *dev, u32 filter_mask) | 
| Jiri Pirko | 4bf84c3 | 2012-12-27 23:49:37 +0000 | [diff] [blame] | 937 | * | 
|  | 938 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | 
|  | 939 | *	Called to change device carrier. Soft-devices (like dummy, team, etc) | 
|  | 940 | *	which do not represent real hardware may define this to allow their | 
|  | 941 | *	userspace components to manage their virtual carrier state. Devices | 
|  | 942 | *	that determine carrier state from physical hardware properties (eg | 
|  | 943 | *	network cables) or protocol-dependent mechanisms (eg | 
|  | 944 | *	USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. | 
| Jiri Pirko | 66b52b0 | 2013-07-29 18:16:49 +0200 | [diff] [blame] | 945 | * | 
|  | 946 | * int (*ndo_get_phys_port_id)(struct net_device *dev, | 
|  | 947 | *			       struct netdev_phys_port_id *ppid); | 
|  | 948 | *	Called to get ID of physical port of this device. If driver does | 
|  | 949 | *	not implement this, it is assumed that the hw is not able to have | 
|  | 950 | *	multiple net devices on single physical port. | 
| Joseph Gasparakis | 53cf5275 | 2013-09-04 02:13:38 -0700 | [diff] [blame] | 951 | * | 
|  | 952 | * void (*ndo_add_vxlan_port)(struct  net_device *dev, | 
| Joseph Gasparakis | 35e4237 | 2013-09-13 07:34:13 -0700 | [diff] [blame] | 953 | *			      sa_family_t sa_family, __be16 port); | 
| Joseph Gasparakis | 53cf5275 | 2013-09-04 02:13:38 -0700 | [diff] [blame] | 954 | *	Called by vxlan to notiy a driver about the UDP port and socket | 
|  | 955 | *	address family that vxlan is listnening to. It is called only when | 
|  | 956 | *	a new port starts listening. The operation is protected by the | 
|  | 957 | *	vxlan_net->sock_lock. | 
|  | 958 | * | 
|  | 959 | * void (*ndo_del_vxlan_port)(struct  net_device *dev, | 
| Joseph Gasparakis | 35e4237 | 2013-09-13 07:34:13 -0700 | [diff] [blame] | 960 | *			      sa_family_t sa_family, __be16 port); | 
| Joseph Gasparakis | 53cf5275 | 2013-09-04 02:13:38 -0700 | [diff] [blame] | 961 | *	Called by vxlan to notify the driver about a UDP port and socket | 
|  | 962 | *	address family that vxlan is not listening to anymore. The operation | 
|  | 963 | *	is protected by the vxlan_net->sock_lock. | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 964 | */ | 
|  | 965 | struct net_device_ops { | 
|  | 966 | int			(*ndo_init)(struct net_device *dev); | 
|  | 967 | void			(*ndo_uninit)(struct net_device *dev); | 
|  | 968 | int			(*ndo_open)(struct net_device *dev); | 
|  | 969 | int			(*ndo_stop)(struct net_device *dev); | 
| Stephen Hemminger | dc1f8bf | 2009-08-31 19:50:40 +0000 | [diff] [blame] | 970 | netdev_tx_t		(*ndo_start_xmit) (struct sk_buff *skb, | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 971 | struct net_device *dev); | 
|  | 972 | u16			(*ndo_select_queue)(struct net_device *dev, | 
|  | 973 | struct sk_buff *skb); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 974 | void			(*ndo_change_rx_flags)(struct net_device *dev, | 
|  | 975 | int flags); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 976 | void			(*ndo_set_rx_mode)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 977 | int			(*ndo_set_mac_address)(struct net_device *dev, | 
|  | 978 | void *addr); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 979 | int			(*ndo_validate_addr)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 980 | int			(*ndo_do_ioctl)(struct net_device *dev, | 
|  | 981 | struct ifreq *ifr, int cmd); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 982 | int			(*ndo_set_config)(struct net_device *dev, | 
|  | 983 | struct ifmap *map); | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 984 | int			(*ndo_change_mtu)(struct net_device *dev, | 
|  | 985 | int new_mtu); | 
|  | 986 | int			(*ndo_neigh_setup)(struct net_device *dev, | 
|  | 987 | struct neigh_parms *); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 988 | void			(*ndo_tx_timeout) (struct net_device *dev); | 
|  | 989 |  | 
| Eric Dumazet | 2817273 | 2010-07-07 14:58:56 -0700 | [diff] [blame] | 990 | struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, | 
|  | 991 | struct rtnl_link_stats64 *storage); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 992 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); | 
|  | 993 |  | 
| Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 994 | int			(*ndo_vlan_rx_add_vid)(struct net_device *dev, | 
| Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 995 | __be16 proto, u16 vid); | 
| Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 996 | int			(*ndo_vlan_rx_kill_vid)(struct net_device *dev, | 
| Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 997 | __be16 proto, u16 vid); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 998 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 999 | void                    (*ndo_poll_controller)(struct net_device *dev); | 
| Herbert Xu | 4247e16 | 2010-06-10 16:12:47 +0000 | [diff] [blame] | 1000 | int			(*ndo_netpoll_setup)(struct net_device *dev, | 
| Amerigo Wang | 47be03a2 | 2012-08-10 01:24:37 +0000 | [diff] [blame] | 1001 | struct netpoll_info *info, | 
|  | 1002 | gfp_t gfp); | 
| WANG Cong | 0e34e93 | 2010-05-06 00:47:21 -0700 | [diff] [blame] | 1003 | void			(*ndo_netpoll_cleanup)(struct net_device *dev); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1004 | #endif | 
| Cong Wang | e0d1095 | 2013-08-01 11:10:25 +0800 | [diff] [blame] | 1005 | #ifdef CONFIG_NET_RX_BUSY_POLL | 
| Eliezer Tamir | 8b80cda | 2013-07-10 17:13:26 +0300 | [diff] [blame] | 1006 | int			(*ndo_busy_poll)(struct napi_struct *dev); | 
| Eliezer Tamir | 0602129 | 2013-06-10 11:39:50 +0300 | [diff] [blame] | 1007 | #endif | 
| Williams, Mitch A | 95c26df | 2010-02-10 01:43:46 +0000 | [diff] [blame] | 1008 | int			(*ndo_set_vf_mac)(struct net_device *dev, | 
|  | 1009 | int queue, u8 *mac); | 
|  | 1010 | int			(*ndo_set_vf_vlan)(struct net_device *dev, | 
|  | 1011 | int queue, u16 vlan, u8 qos); | 
|  | 1012 | int			(*ndo_set_vf_tx_rate)(struct net_device *dev, | 
|  | 1013 | int vf, int rate); | 
| Greg Rose | 5f8444a | 2011-10-08 03:05:24 +0000 | [diff] [blame] | 1014 | int			(*ndo_set_vf_spoofchk)(struct net_device *dev, | 
|  | 1015 | int vf, bool setting); | 
| Williams, Mitch A | 95c26df | 2010-02-10 01:43:46 +0000 | [diff] [blame] | 1016 | int			(*ndo_get_vf_config)(struct net_device *dev, | 
|  | 1017 | int vf, | 
|  | 1018 | struct ifla_vf_info *ivf); | 
| Rony Efraim | 1d8faf4 | 2013-06-13 13:19:10 +0300 | [diff] [blame] | 1019 | int			(*ndo_set_vf_link_state)(struct net_device *dev, | 
|  | 1020 | int vf, int link_state); | 
| Scott Feldman | 57b6108 | 2010-05-17 22:49:55 -0700 | [diff] [blame] | 1021 | int			(*ndo_set_vf_port)(struct net_device *dev, | 
|  | 1022 | int vf, | 
|  | 1023 | struct nlattr *port[]); | 
|  | 1024 | int			(*ndo_get_vf_port)(struct net_device *dev, | 
|  | 1025 | int vf, struct sk_buff *skb); | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1026 | int			(*ndo_setup_tc)(struct net_device *dev, u8 tc); | 
| Ben Hutchings | d11ead7 | 2011-11-25 14:40:26 +0000 | [diff] [blame] | 1027 | #if IS_ENABLED(CONFIG_FCOE) | 
| Yi Zou | cb45439 | 2009-08-31 12:31:36 +0000 | [diff] [blame] | 1028 | int			(*ndo_fcoe_enable)(struct net_device *dev); | 
|  | 1029 | int			(*ndo_fcoe_disable)(struct net_device *dev); | 
| Yi Zou | 4d288d5 | 2009-02-27 14:06:59 -0800 | [diff] [blame] | 1030 | int			(*ndo_fcoe_ddp_setup)(struct net_device *dev, | 
|  | 1031 | u16 xid, | 
|  | 1032 | struct scatterlist *sgl, | 
|  | 1033 | unsigned int sgc); | 
|  | 1034 | int			(*ndo_fcoe_ddp_done)(struct net_device *dev, | 
|  | 1035 | u16 xid); | 
| Yi Zou | 6247e08 | 2011-02-01 07:22:06 +0000 | [diff] [blame] | 1036 | int			(*ndo_fcoe_ddp_target)(struct net_device *dev, | 
|  | 1037 | u16 xid, | 
|  | 1038 | struct scatterlist *sgl, | 
|  | 1039 | unsigned int sgc); | 
| Neerav Parikh | 68bad94 | 2012-01-04 20:23:39 +0000 | [diff] [blame] | 1040 | int			(*ndo_fcoe_get_hbainfo)(struct net_device *dev, | 
|  | 1041 | struct netdev_fcoe_hbainfo *hbainfo); | 
| Bhanu Prakash Gollapudi | 3c9c36bc | 2011-08-26 09:45:41 +0000 | [diff] [blame] | 1042 | #endif | 
|  | 1043 |  | 
| Ben Hutchings | d11ead7 | 2011-11-25 14:40:26 +0000 | [diff] [blame] | 1044 | #if IS_ENABLED(CONFIG_LIBFCOE) | 
| Yi Zou | df5c794 | 2009-10-28 18:24:35 +0000 | [diff] [blame] | 1045 | #define NETDEV_FCOE_WWNN 0 | 
|  | 1046 | #define NETDEV_FCOE_WWPN 1 | 
|  | 1047 | int			(*ndo_fcoe_get_wwn)(struct net_device *dev, | 
|  | 1048 | u64 *wwn, int type); | 
| Yi Zou | 4d288d5 | 2009-02-27 14:06:59 -0800 | [diff] [blame] | 1049 | #endif | 
| Bhanu Prakash Gollapudi | 3c9c36bc | 2011-08-26 09:45:41 +0000 | [diff] [blame] | 1050 |  | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 1051 | #ifdef CONFIG_RFS_ACCEL | 
|  | 1052 | int			(*ndo_rx_flow_steer)(struct net_device *dev, | 
|  | 1053 | const struct sk_buff *skb, | 
|  | 1054 | u16 rxq_index, | 
|  | 1055 | u32 flow_id); | 
|  | 1056 | #endif | 
| Jiri Pirko | fbaec0e | 2011-02-13 10:15:37 +0000 | [diff] [blame] | 1057 | int			(*ndo_add_slave)(struct net_device *dev, | 
|  | 1058 | struct net_device *slave_dev); | 
|  | 1059 | int			(*ndo_del_slave)(struct net_device *dev, | 
|  | 1060 | struct net_device *slave_dev); | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 1061 | netdev_features_t	(*ndo_fix_features)(struct net_device *dev, | 
|  | 1062 | netdev_features_t features); | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 1063 | int			(*ndo_set_features)(struct net_device *dev, | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 1064 | netdev_features_t features); | 
| David Miller | da6a8fa | 2011-07-25 00:01:38 +0000 | [diff] [blame] | 1065 | int			(*ndo_neigh_construct)(struct neighbour *n); | 
| David S. Miller | 447f219 | 2011-12-19 15:04:41 -0500 | [diff] [blame] | 1066 | void			(*ndo_neigh_destroy)(struct neighbour *n); | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 1067 |  | 
|  | 1068 | int			(*ndo_fdb_add)(struct ndmsg *ndm, | 
| stephen hemminger | edc7d57 | 2012-10-01 12:32:33 +0000 | [diff] [blame] | 1069 | struct nlattr *tb[], | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 1070 | struct net_device *dev, | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 1071 | const unsigned char *addr, | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 1072 | u16 flags); | 
|  | 1073 | int			(*ndo_fdb_del)(struct ndmsg *ndm, | 
| Vlad Yasevich | 1690be6 | 2013-02-13 12:00:18 +0000 | [diff] [blame] | 1074 | struct nlattr *tb[], | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 1075 | struct net_device *dev, | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 1076 | const unsigned char *addr); | 
| John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 1077 | int			(*ndo_fdb_dump)(struct sk_buff *skb, | 
|  | 1078 | struct netlink_callback *cb, | 
|  | 1079 | struct net_device *dev, | 
|  | 1080 | int idx); | 
| John Fastabend | e5a55a8 | 2012-10-24 08:12:57 +0000 | [diff] [blame] | 1081 |  | 
|  | 1082 | int			(*ndo_bridge_setlink)(struct net_device *dev, | 
|  | 1083 | struct nlmsghdr *nlh); | 
|  | 1084 | int			(*ndo_bridge_getlink)(struct sk_buff *skb, | 
|  | 1085 | u32 pid, u32 seq, | 
| Vlad Yasevich | 6cbdcee | 2013-02-13 12:00:13 +0000 | [diff] [blame] | 1086 | struct net_device *dev, | 
|  | 1087 | u32 filter_mask); | 
| Vlad Yasevich | 407af32 | 2013-02-13 12:00:12 +0000 | [diff] [blame] | 1088 | int			(*ndo_bridge_dellink)(struct net_device *dev, | 
|  | 1089 | struct nlmsghdr *nlh); | 
| Jiri Pirko | 4bf84c3 | 2012-12-27 23:49:37 +0000 | [diff] [blame] | 1090 | int			(*ndo_change_carrier)(struct net_device *dev, | 
|  | 1091 | bool new_carrier); | 
| Jiri Pirko | 66b52b0 | 2013-07-29 18:16:49 +0200 | [diff] [blame] | 1092 | int			(*ndo_get_phys_port_id)(struct net_device *dev, | 
|  | 1093 | struct netdev_phys_port_id *ppid); | 
| Joseph Gasparakis | 53cf5275 | 2013-09-04 02:13:38 -0700 | [diff] [blame] | 1094 | void			(*ndo_add_vxlan_port)(struct  net_device *dev, | 
|  | 1095 | sa_family_t sa_family, | 
| Joseph Gasparakis | 35e4237 | 2013-09-13 07:34:13 -0700 | [diff] [blame] | 1096 | __be16 port); | 
| Joseph Gasparakis | 53cf5275 | 2013-09-04 02:13:38 -0700 | [diff] [blame] | 1097 | void			(*ndo_del_vxlan_port)(struct  net_device *dev, | 
|  | 1098 | sa_family_t sa_family, | 
| Joseph Gasparakis | 35e4237 | 2013-09-13 07:34:13 -0700 | [diff] [blame] | 1099 | __be16 port); | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1100 | }; | 
|  | 1101 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1102 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | *	The DEVICE structure. | 
|  | 1104 | *	Actually, this whole structure is a big mistake.  It mixes I/O | 
|  | 1105 | *	data with strictly "high-level" data, and it has to know about | 
|  | 1106 | *	almost every data structure used in the INET module. | 
|  | 1107 | * | 
|  | 1108 | *	FIXME: cleanup struct net_device such that network protocol info | 
|  | 1109 | *	moves out. | 
|  | 1110 | */ | 
|  | 1111 |  | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 1112 | struct net_device { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1113 |  | 
|  | 1114 | /* | 
|  | 1115 | * This is the first field of the "visible" part of this structure | 
|  | 1116 | * (i.e. as seen by users in the "Space.c" file).  It is the name | 
| Justin P. Mattock | 724df61 | 2010-05-26 09:22:40 -0700 | [diff] [blame] | 1117 | * of the interface. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | */ | 
|  | 1119 | char			name[IFNAMSIZ]; | 
| Mark Gross | ed77134 | 2010-05-06 01:59:26 +0200 | [diff] [blame] | 1120 |  | 
| Eric Dumazet | 9136461 | 2012-06-11 06:36:13 +0000 | [diff] [blame] | 1121 | /* device name hash chain, please keep it close to name[] */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1122 | struct hlist_node	name_hlist; | 
| Eric Dumazet | 9136461 | 2012-06-11 06:36:13 +0000 | [diff] [blame] | 1123 |  | 
| Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 1124 | /* snmp alias */ | 
|  | 1125 | char 			*ifalias; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 |  | 
|  | 1127 | /* | 
|  | 1128 | *	I/O specific fields | 
|  | 1129 | *	FIXME: Merge these and struct ifmap into one | 
|  | 1130 | */ | 
|  | 1131 | unsigned long		mem_end;	/* shared mem end	*/ | 
|  | 1132 | unsigned long		mem_start;	/* shared mem start	*/ | 
|  | 1133 | unsigned long		base_addr;	/* device I/O address	*/ | 
|  | 1134 | unsigned int		irq;		/* device IRQ number	*/ | 
|  | 1135 |  | 
|  | 1136 | /* | 
|  | 1137 | *	Some hardware also needs these fields, but they are not | 
|  | 1138 | *	part of the usual set specified in Space.c. | 
|  | 1139 | */ | 
|  | 1140 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | unsigned long		state; | 
|  | 1142 |  | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 1143 | struct list_head	dev_list; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1144 | struct list_head	napi_list; | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 1145 | struct list_head	unreg_list; | 
| Eric Dumazet | 4c3d5e7 | 2013-03-30 06:31:03 +0000 | [diff] [blame] | 1146 | struct list_head	upper_dev_list; /* List of upper devices */ | 
| Veaceslav Falico | 5d26191 | 2013-08-28 23:25:05 +0200 | [diff] [blame] | 1147 | struct list_head	lower_dev_list; | 
| Eric Dumazet | 4c3d5e7 | 2013-03-30 06:31:03 +0000 | [diff] [blame] | 1148 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 |  | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 1150 | /* currently active device features */ | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 1151 | netdev_features_t	features; | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 1152 | /* user-changeable features */ | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 1153 | netdev_features_t	hw_features; | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 1154 | /* user-requested features */ | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 1155 | netdev_features_t	wanted_features; | 
| Michał Mirosław | 1aac626 | 2011-04-12 04:07:39 +0000 | [diff] [blame] | 1156 | /* mask of features inheritable by VLAN devices */ | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 1157 | netdev_features_t	vlan_features; | 
| Joseph Gasparakis | 6a674e9 | 2012-12-07 14:14:14 +0000 | [diff] [blame] | 1158 | /* mask of features inherited by encapsulating devices | 
|  | 1159 | * This field indicates what encapsulation offloads | 
|  | 1160 | * the hardware is capable of doing, and drivers will | 
|  | 1161 | * need to set them appropriately. | 
|  | 1162 | */ | 
|  | 1163 | netdev_features_t	hw_enc_features; | 
| Simon Horman | 0d89d20 | 2013-05-23 21:02:52 +0000 | [diff] [blame] | 1164 | /* mask of fetures inheritable by MPLS */ | 
|  | 1165 | netdev_features_t	mpls_features; | 
| Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 1166 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 | /* Interface index. Unique device identifier	*/ | 
|  | 1168 | int			ifindex; | 
|  | 1169 | int			iflink; | 
|  | 1170 |  | 
| Rusty Russell | c45d286 | 2007-03-28 14:29:08 -0700 | [diff] [blame] | 1171 | struct net_device_stats	stats; | 
| Eric Dumazet | caf586e | 2010-09-30 21:06:55 +0000 | [diff] [blame] | 1172 | atomic_long_t		rx_dropped; /* dropped packets by core network | 
|  | 1173 | * Do not use this in drivers. | 
|  | 1174 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 |  | 
| Johannes Berg | b86e028 | 2007-04-26 20:48:23 -0700 | [diff] [blame] | 1176 | #ifdef CONFIG_WIRELESS_EXT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | /* List of functions to handle Wireless Extensions (instead of ioctl). | 
|  | 1178 | * See <net/iw_handler.h> for details. Jean II */ | 
|  | 1179 | const struct iw_handler_def *	wireless_handlers; | 
|  | 1180 | /* Instance data managed by the core of Wireless Extensions. */ | 
|  | 1181 | struct iw_public_data *	wireless_data; | 
| Johannes Berg | b86e028 | 2007-04-26 20:48:23 -0700 | [diff] [blame] | 1182 | #endif | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1183 | /* Management operations */ | 
|  | 1184 | const struct net_device_ops *netdev_ops; | 
| Stephen Hemminger | 76fd859 | 2006-09-08 11:16:13 -0700 | [diff] [blame] | 1185 | const struct ethtool_ops *ethtool_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1186 |  | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 1187 | /* Hardware header description */ | 
|  | 1188 | const struct header_ops *header_ops; | 
|  | 1189 |  | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1190 | unsigned int		flags;	/* interface flags (a la BSD)	*/ | 
| Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 1191 | unsigned int		priv_flags; /* Like 'flags' but invisible to userspace. | 
|  | 1192 | * See if.h for definitions. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1193 | unsigned short		gflags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | unsigned short		padded;	/* How much padding added by alloc_netdev() */ | 
|  | 1195 |  | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1196 | unsigned char		operstate; /* RFC2863 operstate */ | 
|  | 1197 | unsigned char		link_mode; /* mapping policy to operstate */ | 
|  | 1198 |  | 
| Joe Perches | bdc220d | 2011-05-09 17:42:46 +0000 | [diff] [blame] | 1199 | unsigned char		if_port;	/* Selectable AUI, TP,..*/ | 
|  | 1200 | unsigned char		dma;		/* DMA channel		*/ | 
|  | 1201 |  | 
| David S. Miller | cd7b539 | 2010-05-02 22:27:59 -0700 | [diff] [blame] | 1202 | unsigned int		mtu;	/* interface MTU value		*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | unsigned short		type;	/* interface hardware type	*/ | 
|  | 1204 | unsigned short		hard_header_len;	/* hardware hdr length	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 |  | 
| Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 1206 | /* extra head- and tailroom the hardware may need, but not in all cases | 
|  | 1207 | * can this be guaranteed, especially tailroom. Some cases also use | 
|  | 1208 | * LL_MAX_HEADER instead to allocate the skb. | 
|  | 1209 | */ | 
|  | 1210 | unsigned short		needed_headroom; | 
|  | 1211 | unsigned short		needed_tailroom; | 
|  | 1212 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1213 | /* Interface address info. */ | 
| Jon Wetzel | a6f9a70 | 2005-08-20 17:15:54 -0700 | [diff] [blame] | 1214 | unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 
| Stefan Assmann | c1f7942 | 2010-07-22 02:50:21 +0000 | [diff] [blame] | 1215 | unsigned char		addr_assign_type; /* hw address assignment type */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | unsigned char		addr_len;	/* hardware address length	*/ | 
| David Miller | 596b9b6 | 2011-07-25 00:01:25 +0000 | [diff] [blame] | 1217 | unsigned char		neigh_priv_len; | 
| Narendra K | dffebd2 | 2013-06-10 19:34:03 +0530 | [diff] [blame] | 1218 | unsigned short          dev_id;		/* Used to differentiate devices | 
|  | 1219 | * that share the same link | 
|  | 1220 | * layer address | 
|  | 1221 | */ | 
| Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1222 | spinlock_t		addr_list_lock; | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1223 | struct netdev_hw_addr_list	uc;	/* Unicast mac addresses */ | 
|  | 1224 | struct netdev_hw_addr_list	mc;	/* Multicast mac addresses */ | 
| Eric Dumazet | 4c3d5e7 | 2013-03-30 06:31:03 +0000 | [diff] [blame] | 1225 | struct netdev_hw_addr_list	dev_addrs; /* list of device | 
|  | 1226 | * hw addresses | 
|  | 1227 | */ | 
|  | 1228 | #ifdef CONFIG_SYSFS | 
|  | 1229 | struct kset		*queues_kset; | 
|  | 1230 | #endif | 
|  | 1231 |  | 
| Joe Perches | 2d348d1 | 2011-07-25 16:17:35 -0700 | [diff] [blame] | 1232 | bool			uc_promisc; | 
| Wang Chen | 9d45abe | 2008-06-17 21:12:48 -0700 | [diff] [blame] | 1233 | unsigned int		promiscuity; | 
|  | 1234 | unsigned int		allmulti; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1236 |  | 
|  | 1237 | /* Protocol specific pointers */ | 
| Jesse Gross | 65ac6a5 | 2010-10-20 13:56:05 +0000 | [diff] [blame] | 1238 |  | 
| Ben Hutchings | d11ead7 | 2011-11-25 14:40:26 +0000 | [diff] [blame] | 1239 | #if IS_ENABLED(CONFIG_VLAN_8021Q) | 
| Jiri Pirko | 5b9ea6e | 2011-12-08 04:11:18 +0000 | [diff] [blame] | 1240 | struct vlan_info __rcu	*vlan_info;	/* VLAN info */ | 
| Jesse Gross | 65ac6a5 | 2010-10-20 13:56:05 +0000 | [diff] [blame] | 1241 | #endif | 
| Ben Hutchings | 34a430d | 2011-11-25 14:38:38 +0000 | [diff] [blame] | 1242 | #if IS_ENABLED(CONFIG_NET_DSA) | 
| Ben Hutchings | cf50dcc | 2011-11-25 14:32:52 +0000 | [diff] [blame] | 1243 | struct dsa_switch_tree	*dsa_ptr;	/* dsa specific data */ | 
| Lennert Buytenhek | 91da11f | 2008-10-07 13:44:02 +0000 | [diff] [blame] | 1244 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | void 			*atalk_ptr;	/* AppleTalk link 	*/ | 
| Eric Dumazet | 95ae6b2 | 2010-09-15 04:04:31 +0000 | [diff] [blame] | 1246 | struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/ | 
| Eric Dumazet | fc766e4c | 2010-10-29 03:09:24 +0000 | [diff] [blame] | 1247 | struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */ | 
| Eric Dumazet | 198caec | 2010-10-24 21:32:05 +0000 | [diff] [blame] | 1248 | struct inet6_dev __rcu	*ip6_ptr;       /* IPv6 specific data */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | void			*ax25_ptr;	/* AX.25 specific data */ | 
| Johannes Berg | 704232c | 2007-04-23 12:20:05 -0700 | [diff] [blame] | 1250 | struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data, | 
|  | 1251 | assign before registering */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1253 | /* | 
| Eric Dumazet | cd13539 | 2010-09-16 02:58:13 +0000 | [diff] [blame] | 1254 | * Cache lines mostly used on receive path (including eth_type_trans()) | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1255 | */ | 
| Eric Dumazet | 4dc8913 | 2010-08-31 07:40:16 +0000 | [diff] [blame] | 1256 | unsigned long		last_rx;	/* Time of last Rx | 
|  | 1257 | * This should not be set in | 
|  | 1258 | * drivers, unless really needed, | 
|  | 1259 | * because network stack (bonding) | 
|  | 1260 | * use it if/when necessary, to | 
|  | 1261 | * avoid dirtying this cache line. | 
|  | 1262 | */ | 
|  | 1263 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1264 | /* Interface address info used in eth_type_trans() */ | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 1265 | unsigned char		*dev_addr;	/* hw address, (before bcast | 
|  | 1266 | because most packets are | 
|  | 1267 | unicast) */ | 
|  | 1268 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1269 |  | 
| david decotigny | ccf5ff6 | 2011-11-16 12:15:10 +0000 | [diff] [blame] | 1270 | #ifdef CONFIG_RPS | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1271 | struct netdev_rx_queue	*_rx; | 
|  | 1272 |  | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1273 | /* Number of RX queues allocated at register_netdev() time */ | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1274 | unsigned int		num_rx_queues; | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 1275 |  | 
|  | 1276 | /* Number of RX queues currently active in device */ | 
|  | 1277 | unsigned int		real_num_rx_queues; | 
| Ben Hutchings | c445477 | 2011-01-19 11:03:53 +0000 | [diff] [blame] | 1278 |  | 
| Eric Dumazet | df33454 | 2010-03-24 19:13:54 +0000 | [diff] [blame] | 1279 | #endif | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1280 |  | 
| stephen hemminger | 61391cd | 2010-11-15 06:38:12 +0000 | [diff] [blame] | 1281 | rx_handler_func_t __rcu	*rx_handler; | 
|  | 1282 | void __rcu		*rx_handler_data; | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1283 |  | 
| Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 1284 | struct netdev_queue __rcu *ingress_queue; | 
| Eric Dumazet | 4c3d5e7 | 2013-03-30 06:31:03 +0000 | [diff] [blame] | 1285 | unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/ | 
|  | 1286 |  | 
| Eric Dumazet | cd13539 | 2010-09-16 02:58:13 +0000 | [diff] [blame] | 1287 |  | 
|  | 1288 | /* | 
|  | 1289 | * Cache lines mostly used on transmit path | 
|  | 1290 | */ | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1291 | struct netdev_queue	*_tx ____cacheline_aligned_in_smp; | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1292 |  | 
|  | 1293 | /* Number of TX queues allocated at alloc_netdev_mq() time  */ | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1294 | unsigned int		num_tx_queues; | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1295 |  | 
|  | 1296 | /* Number of TX queues currently active in device  */ | 
|  | 1297 | unsigned int		real_num_tx_queues; | 
|  | 1298 |  | 
| Patrick McHardy | af356af | 2009-09-04 06:41:18 +0000 | [diff] [blame] | 1299 | /* root qdisc from userspace point of view */ | 
|  | 1300 | struct Qdisc		*qdisc; | 
|  | 1301 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | unsigned long		tx_queue_len;	/* Max frames per queue allowed */ | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 1303 | spinlock_t		tx_global_lock; | 
| Eric Dumazet | cd13539 | 2010-09-16 02:58:13 +0000 | [diff] [blame] | 1304 |  | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1305 | #ifdef CONFIG_XPS | 
| Eric Dumazet | a417786 | 2010-11-28 21:43:02 +0000 | [diff] [blame] | 1306 | struct xps_dev_maps __rcu *xps_maps; | 
| Tom Herbert | bf26414 | 2010-11-26 08:36:09 +0000 | [diff] [blame] | 1307 | #endif | 
| Eric Dumazet | 4c3d5e7 | 2013-03-30 06:31:03 +0000 | [diff] [blame] | 1308 | #ifdef CONFIG_RFS_ACCEL | 
|  | 1309 | /* CPU reverse-mapping for RX completion interrupts, indexed | 
|  | 1310 | * by RX queue number.  Assigned by driver.  This must only be | 
|  | 1311 | * set if the ndo_rx_flow_steer operation is defined. */ | 
|  | 1312 | struct cpu_rmap		*rx_cpu_rmap; | 
|  | 1313 | #endif | 
| Tom Herbert | 1d24eb4 | 2010-11-21 13:17:27 +0000 | [diff] [blame] | 1314 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1315 | /* These may be needed for future network-power-down code. */ | 
| Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 1316 |  | 
|  | 1317 | /* | 
|  | 1318 | * trans_start here is expensive for high speed devices on SMP, | 
|  | 1319 | * please use netdev_queue->trans_start instead. | 
|  | 1320 | */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1321 | unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/ | 
|  | 1322 |  | 
|  | 1323 | int			watchdog_timeo; /* used by dev_watchdog() */ | 
|  | 1324 | struct timer_list	watchdog_timer; | 
|  | 1325 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | /* Number of references to this device */ | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 1327 | int __percpu		*pcpu_refcnt; | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 1328 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | /* delayed register/unregister */ | 
|  | 1330 | struct list_head	todo_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | /* device index hash chain */ | 
|  | 1332 | struct hlist_node	index_hlist; | 
|  | 1333 |  | 
| Eric Dumazet | e014deb | 2009-11-17 05:59:21 +0000 | [diff] [blame] | 1334 | struct list_head	link_watch_list; | 
| Herbert Xu | 572a103 | 2007-05-08 18:34:17 -0700 | [diff] [blame] | 1335 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | /* register/unregister state machine */ | 
|  | 1337 | enum { NETREG_UNINITIALIZED=0, | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 1338 | NETREG_REGISTERED,	/* completed register_netdevice */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1339 | NETREG_UNREGISTERING,	/* called unregister_netdevice */ | 
|  | 1340 | NETREG_UNREGISTERED,	/* completed unregister todo */ | 
|  | 1341 | NETREG_RELEASED,		/* called free_netdev */ | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 1342 | NETREG_DUMMY,		/* dummy device for NAPI poll */ | 
| Eric Dumazet | 449f454 | 2011-05-19 12:24:16 +0000 | [diff] [blame] | 1343 | } reg_state:8; | 
|  | 1344 |  | 
|  | 1345 | bool dismantle; /* device is going do be freed */ | 
| Patrick McHardy | a283576 | 2010-02-26 06:34:51 +0000 | [diff] [blame] | 1346 |  | 
|  | 1347 | enum { | 
|  | 1348 | RTNL_LINK_INITIALIZED, | 
|  | 1349 | RTNL_LINK_INITIALIZING, | 
|  | 1350 | } rtnl_link_state:16; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 |  | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1352 | /* Called from unregister, can be used to call free_netdev */ | 
|  | 1353 | void (*destructor)(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | #ifdef CONFIG_NETPOLL | 
| Cong Wang | 5fbee84 | 2013-01-22 21:29:39 +0000 | [diff] [blame] | 1356 | struct netpoll_info __rcu	*npinfo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | #endif | 
| David S. Miller | eae792b | 2008-07-15 03:03:33 -0700 | [diff] [blame] | 1358 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1359 | #ifdef CONFIG_NET_NS | 
| Eric W. Biederman | 4a1c537 | 2007-09-12 11:56:32 +0200 | [diff] [blame] | 1360 | /* Network namespace this network device is inside */ | 
|  | 1361 | struct net		*nd_net; | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1362 | #endif | 
| Eric W. Biederman | 4a1c537 | 2007-09-12 11:56:32 +0200 | [diff] [blame] | 1363 |  | 
| David S. Miller | 4951704 | 2008-05-12 03:29:11 -0700 | [diff] [blame] | 1364 | /* mid-layer private */ | 
| Eric Dumazet | a7855c7 | 2010-09-23 23:51:51 +0000 | [diff] [blame] | 1365 | union { | 
|  | 1366 | void				*ml_priv; | 
|  | 1367 | struct pcpu_lstats __percpu	*lstats; /* loopback stats */ | 
| Eric Dumazet | 290b895 | 2010-09-27 00:33:35 +0000 | [diff] [blame] | 1368 | struct pcpu_tstats __percpu	*tstats; /* tunnel stats */ | 
| Eric Dumazet | 6d81f41 | 2010-09-27 20:50:33 +0000 | [diff] [blame] | 1369 | struct pcpu_dstats __percpu	*dstats; /* dummy stats */ | 
| Eric Dumazet | 2681128 | 2012-12-29 16:02:43 +0000 | [diff] [blame] | 1370 | struct pcpu_vstats __percpu	*vstats; /* veth stats */ | 
| Eric Dumazet | a7855c7 | 2010-09-23 23:51:51 +0000 | [diff] [blame] | 1371 | }; | 
| Patrick McHardy | eca9eba | 2008-07-05 21:26:13 -0700 | [diff] [blame] | 1372 | /* GARP */ | 
| Eric Dumazet | 3cc77ec | 2010-10-24 21:32:36 +0000 | [diff] [blame] | 1373 | struct garp_port __rcu	*garp_port; | 
| David Ward | febf018 | 2013-02-08 17:17:06 +0000 | [diff] [blame] | 1374 | /* MRP */ | 
|  | 1375 | struct mrp_port __rcu	*mrp_port; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | /* class/net/name entry */ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1378 | struct device		dev; | 
| Eric W. Biederman | 0c509a6 | 2009-10-29 14:18:21 +0000 | [diff] [blame] | 1379 | /* space for optional device, statistics, and wireless sysfs groups */ | 
|  | 1380 | const struct attribute_group *sysfs_groups[4]; | 
| Patrick McHardy | 38f7b87 | 2007-06-13 12:03:51 -0700 | [diff] [blame] | 1381 |  | 
|  | 1382 | /* rtnetlink link ops */ | 
|  | 1383 | const struct rtnl_link_ops *rtnl_link_ops; | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1384 |  | 
| Peter P Waskiewicz Jr | 82cc1a7 | 2008-03-21 03:43:19 -0700 | [diff] [blame] | 1385 | /* for setting kernel sock attribute on TCP connection setup */ | 
|  | 1386 | #define GSO_MAX_SIZE		65536 | 
|  | 1387 | unsigned int		gso_max_size; | 
| Ben Hutchings | 30b678d | 2012-07-30 15:57:00 +0000 | [diff] [blame] | 1388 | #define GSO_MAX_SEGS		65535 | 
|  | 1389 | u16			gso_max_segs; | 
| Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 1390 |  | 
| Jeff Kirsher | 7a6b6f5 | 2008-11-25 01:02:08 -0800 | [diff] [blame] | 1391 | #ifdef CONFIG_DCB | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 1392 | /* Data Center Bridging netlink ops */ | 
| Stephen Hemminger | 3295354 | 2009-10-05 06:01:03 +0000 | [diff] [blame] | 1393 | const struct dcbnl_rtnl_ops *dcbnl_ops; | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 1394 | #endif | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1395 | u8 num_tc; | 
|  | 1396 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | 
|  | 1397 | u8 prio_tc_map[TC_BITMASK + 1]; | 
| Alexander Duyck | 2f90b86 | 2008-11-20 20:52:10 -0800 | [diff] [blame] | 1398 |  | 
| Ben Hutchings | d11ead7 | 2011-11-25 14:40:26 +0000 | [diff] [blame] | 1399 | #if IS_ENABLED(CONFIG_FCOE) | 
| Yi Zou | 4d288d5 | 2009-02-27 14:06:59 -0800 | [diff] [blame] | 1400 | /* max exchange id for FCoE LRO by ddp */ | 
|  | 1401 | unsigned int		fcoe_ddp_xid; | 
|  | 1402 | #endif | 
| Neil Horman | 5bc1421 | 2011-11-22 05:10:51 +0000 | [diff] [blame] | 1403 | #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) | 
|  | 1404 | struct netprio_map __rcu *priomap; | 
|  | 1405 | #endif | 
| Richard Cochran | c1f19b5 | 2010-07-17 08:49:36 +0000 | [diff] [blame] | 1406 | /* phy device may attach itself for hardware timestamping */ | 
|  | 1407 | struct phy_device *phydev; | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 1408 |  | 
| Eric Dumazet | 23d3b8b | 2012-09-05 01:02:56 +0000 | [diff] [blame] | 1409 | struct lock_class_key *qdisc_tx_busylock; | 
|  | 1410 |  | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 1411 | /* group the device belongs to */ | 
|  | 1412 | int group; | 
| Eric Dumazet | 9136461 | 2012-06-11 06:36:13 +0000 | [diff] [blame] | 1413 |  | 
|  | 1414 | struct pm_qos_request	pm_qos_req; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | }; | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1416 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1417 |  | 
|  | 1418 | #define	NETDEV_ALIGN		32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1419 |  | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1420 | static inline | 
| John Fastabend | 4f57c08 | 2011-01-17 08:06:04 +0000 | [diff] [blame] | 1421 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | 
|  | 1422 | { | 
|  | 1423 | return dev->prio_tc_map[prio & TC_BITMASK]; | 
|  | 1424 | } | 
|  | 1425 |  | 
|  | 1426 | static inline | 
|  | 1427 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | 
|  | 1428 | { | 
|  | 1429 | if (tc >= dev->num_tc) | 
|  | 1430 | return -EINVAL; | 
|  | 1431 |  | 
|  | 1432 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | 
|  | 1433 | return 0; | 
|  | 1434 | } | 
|  | 1435 |  | 
|  | 1436 | static inline | 
|  | 1437 | void netdev_reset_tc(struct net_device *dev) | 
|  | 1438 | { | 
|  | 1439 | dev->num_tc = 0; | 
|  | 1440 | memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); | 
|  | 1441 | memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); | 
|  | 1442 | } | 
|  | 1443 |  | 
|  | 1444 | static inline | 
|  | 1445 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) | 
|  | 1446 | { | 
|  | 1447 | if (tc >= dev->num_tc) | 
|  | 1448 | return -EINVAL; | 
|  | 1449 |  | 
|  | 1450 | dev->tc_to_txq[tc].count = count; | 
|  | 1451 | dev->tc_to_txq[tc].offset = offset; | 
|  | 1452 | return 0; | 
|  | 1453 | } | 
|  | 1454 |  | 
|  | 1455 | static inline | 
|  | 1456 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc) | 
|  | 1457 | { | 
|  | 1458 | if (num_tc > TC_MAX_QUEUE) | 
|  | 1459 | return -EINVAL; | 
|  | 1460 |  | 
|  | 1461 | dev->num_tc = num_tc; | 
|  | 1462 | return 0; | 
|  | 1463 | } | 
|  | 1464 |  | 
|  | 1465 | static inline | 
|  | 1466 | int netdev_get_num_tc(struct net_device *dev) | 
|  | 1467 | { | 
|  | 1468 | return dev->num_tc; | 
|  | 1469 | } | 
|  | 1470 |  | 
|  | 1471 | static inline | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1472 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | 
|  | 1473 | unsigned int index) | 
|  | 1474 | { | 
|  | 1475 | return &dev->_tx[index]; | 
|  | 1476 | } | 
|  | 1477 |  | 
|  | 1478 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | 
|  | 1479 | void (*f)(struct net_device *, | 
|  | 1480 | struct netdev_queue *, | 
|  | 1481 | void *), | 
|  | 1482 | void *arg) | 
|  | 1483 | { | 
|  | 1484 | unsigned int i; | 
|  | 1485 |  | 
|  | 1486 | for (i = 0; i < dev->num_tx_queues; i++) | 
|  | 1487 | f(dev, &dev->_tx[i], arg); | 
|  | 1488 | } | 
|  | 1489 |  | 
| Amerigo Wang | 8c4c49d | 2012-09-17 20:16:31 +0000 | [diff] [blame] | 1490 | extern struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 
|  | 1491 | struct sk_buff *skb); | 
| Alexander Duyck | 416186f | 2013-01-10 08:56:51 +0000 | [diff] [blame] | 1492 | extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); | 
| Amerigo Wang | 8c4c49d | 2012-09-17 20:16:31 +0000 | [diff] [blame] | 1493 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1494 | /* | 
|  | 1495 | * Net namespace inlines | 
|  | 1496 | */ | 
|  | 1497 | static inline | 
|  | 1498 | struct net *dev_net(const struct net_device *dev) | 
|  | 1499 | { | 
| Eric Dumazet | c2d9ba9 | 2010-06-01 06:51:19 +0000 | [diff] [blame] | 1500 | return read_pnet(&dev->nd_net); | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1501 | } | 
|  | 1502 |  | 
|  | 1503 | static inline | 
| Denis V. Lunev | f5aa23f | 2008-03-26 00:48:17 -0700 | [diff] [blame] | 1504 | void dev_net_set(struct net_device *dev, struct net *net) | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1505 | { | 
|  | 1506 | #ifdef CONFIG_NET_NS | 
| Denis V. Lunev | f3005d7 | 2008-04-16 02:02:18 -0700 | [diff] [blame] | 1507 | release_net(dev->nd_net); | 
|  | 1508 | dev->nd_net = hold_net(net); | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1509 | #endif | 
|  | 1510 | } | 
|  | 1511 |  | 
| Lennert Buytenhek | cf85d08 | 2008-10-07 13:45:02 +0000 | [diff] [blame] | 1512 | static inline bool netdev_uses_dsa_tags(struct net_device *dev) | 
|  | 1513 | { | 
|  | 1514 | #ifdef CONFIG_NET_DSA_TAG_DSA | 
|  | 1515 | if (dev->dsa_ptr != NULL) | 
|  | 1516 | return dsa_uses_dsa_tags(dev->dsa_ptr); | 
|  | 1517 | #endif | 
|  | 1518 |  | 
|  | 1519 | return 0; | 
|  | 1520 | } | 
|  | 1521 |  | 
| Lennert Buytenhek | 396138f0 | 2008-10-07 13:46:07 +0000 | [diff] [blame] | 1522 | static inline bool netdev_uses_trailer_tags(struct net_device *dev) | 
|  | 1523 | { | 
|  | 1524 | #ifdef CONFIG_NET_DSA_TAG_TRAILER | 
|  | 1525 | if (dev->dsa_ptr != NULL) | 
|  | 1526 | return dsa_uses_trailer_tags(dev->dsa_ptr); | 
|  | 1527 | #endif | 
|  | 1528 |  | 
|  | 1529 | return 0; | 
|  | 1530 | } | 
|  | 1531 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1532 | /** | 
|  | 1533 | *	netdev_priv - access network device private data | 
|  | 1534 | *	@dev: network device | 
|  | 1535 | * | 
|  | 1536 | * Get network device private data | 
|  | 1537 | */ | 
| Patrick McHardy | 6472ce6 | 2007-06-13 12:03:21 -0700 | [diff] [blame] | 1538 | static inline void *netdev_priv(const struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 | { | 
| Eric Dumazet | 1ce8e7b | 2009-05-27 04:42:37 +0000 | [diff] [blame] | 1540 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1541 | } | 
|  | 1542 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | /* Set the sysfs physical device reference for the network logical device | 
|  | 1544 | * if set prior to registration will cause a symlink during initialization. | 
|  | 1545 | */ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1546 | #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 |  | 
| Marcel Holtmann | 384912e | 2009-08-31 21:08:19 +0000 | [diff] [blame] | 1548 | /* Set the sysfs device type for the network logical device to allow | 
|  | 1549 | * fin grained indentification of different network device types. For | 
|  | 1550 | * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. | 
|  | 1551 | */ | 
|  | 1552 | #define SET_NETDEV_DEVTYPE(net, devtype)	((net)->dev.type = (devtype)) | 
|  | 1553 |  | 
| Eric Dumazet | 82dc3c6 | 2013-03-05 15:57:22 +0000 | [diff] [blame] | 1554 | /* Default NAPI poll() weight | 
|  | 1555 | * Device drivers are strongly advised to not use bigger value | 
|  | 1556 | */ | 
|  | 1557 | #define NAPI_POLL_WEIGHT 64 | 
|  | 1558 |  | 
| Stephen Hemminger | 3b582cc | 2007-11-01 02:21:47 -0700 | [diff] [blame] | 1559 | /** | 
|  | 1560 | *	netif_napi_add - initialize a napi context | 
|  | 1561 | *	@dev:  network device | 
|  | 1562 | *	@napi: napi context | 
|  | 1563 | *	@poll: polling function | 
|  | 1564 | *	@weight: default weight | 
|  | 1565 | * | 
|  | 1566 | * netif_napi_add() must be used to initialize a napi context prior to calling | 
|  | 1567 | * *any* of the other napi related functions. | 
|  | 1568 | */ | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1569 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | 
|  | 1570 | int (*poll)(struct napi_struct *, int), int weight); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1571 |  | 
| Alexander Duyck | d815653 | 2008-07-08 15:13:05 -0700 | [diff] [blame] | 1572 | /** | 
|  | 1573 | *  netif_napi_del - remove a napi context | 
|  | 1574 | *  @napi: napi context | 
|  | 1575 | * | 
|  | 1576 | *  netif_napi_del() removes a napi context from the network device napi list | 
|  | 1577 | */ | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1578 | void netif_napi_del(struct napi_struct *napi); | 
|  | 1579 |  | 
|  | 1580 | struct napi_gro_cb { | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 1581 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ | 
|  | 1582 | void *frag0; | 
|  | 1583 |  | 
| Herbert Xu | 7489594 | 2009-05-26 18:50:27 +0000 | [diff] [blame] | 1584 | /* Length of frag0. */ | 
|  | 1585 | unsigned int frag0_len; | 
|  | 1586 |  | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1587 | /* This indicates where we are processing relative to skb->data. */ | 
|  | 1588 | int data_offset; | 
|  | 1589 |  | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1590 | /* This is non-zero if the packet cannot be merged with the new skb. */ | 
|  | 1591 | int flush; | 
|  | 1592 |  | 
|  | 1593 | /* Number of segments aggregated. */ | 
| Eric Dumazet | 2e71a6f | 2012-10-06 08:08:49 +0000 | [diff] [blame] | 1594 | u16	count; | 
|  | 1595 |  | 
|  | 1596 | /* This is non-zero if the packet may be of the same flow. */ | 
|  | 1597 | u8	same_flow; | 
| Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 1598 |  | 
|  | 1599 | /* Free the skb? */ | 
| Eric Dumazet | 2e71a6f | 2012-10-06 08:08:49 +0000 | [diff] [blame] | 1600 | u8	free; | 
| Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 1601 | #define NAPI_GRO_FREE		  1 | 
|  | 1602 | #define NAPI_GRO_FREE_STOLEN_HEAD 2 | 
| Eric Dumazet | 2e71a6f | 2012-10-06 08:08:49 +0000 | [diff] [blame] | 1603 |  | 
|  | 1604 | /* jiffies when first packet was created/queued */ | 
|  | 1605 | unsigned long age; | 
| Eric Dumazet | 8634724 | 2012-10-08 21:38:50 +0200 | [diff] [blame] | 1606 |  | 
|  | 1607 | /* Used in ipv6_gro_receive() */ | 
|  | 1608 | int	proto; | 
| Eric Dumazet | c3c7c25 | 2012-12-06 13:54:59 +0000 | [diff] [blame] | 1609 |  | 
|  | 1610 | /* used in skb_gro_receive() slow path */ | 
|  | 1611 | struct sk_buff *last; | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 1612 | }; | 
|  | 1613 |  | 
|  | 1614 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | 
| Alexander Duyck | d815653 | 2008-07-08 15:13:05 -0700 | [diff] [blame] | 1615 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 | struct packet_type { | 
| David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 1617 | __be16			type;	/* This is really htons(ether_type). */ | 
|  | 1618 | struct net_device	*dev;	/* NULL is wildcarded here	     */ | 
|  | 1619 | int			(*func) (struct sk_buff *, | 
|  | 1620 | struct net_device *, | 
|  | 1621 | struct packet_type *, | 
|  | 1622 | struct net_device *); | 
| Eric Leblond | c0de08d | 2012-08-16 22:02:58 +0000 | [diff] [blame] | 1623 | bool			(*id_match)(struct packet_type *ptype, | 
|  | 1624 | struct sock *sk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1625 | void			*af_packet_priv; | 
|  | 1626 | struct list_head	list; | 
|  | 1627 | }; | 
|  | 1628 |  | 
| Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 1629 | struct offload_callbacks { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 | struct sk_buff		*(*gso_segment)(struct sk_buff *skb, | 
|  | 1631 | netdev_features_t features); | 
|  | 1632 | int			(*gso_send_check)(struct sk_buff *skb); | 
|  | 1633 | struct sk_buff		**(*gro_receive)(struct sk_buff **head, | 
|  | 1634 | struct sk_buff *skb); | 
|  | 1635 | int			(*gro_complete)(struct sk_buff *skb); | 
| Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 1636 | }; | 
|  | 1637 |  | 
|  | 1638 | struct packet_offload { | 
|  | 1639 | __be16			 type;	/* This is really htons(ether_type). */ | 
|  | 1640 | struct offload_callbacks callbacks; | 
|  | 1641 | struct list_head	 list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 | }; | 
|  | 1643 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | #include <linux/notifier.h> | 
|  | 1645 |  | 
| Amerigo Wang | dcfe142 | 2011-07-25 17:13:09 -0700 | [diff] [blame] | 1646 | /* netdevice notifier chain. Please remember to update the rtnetlink | 
|  | 1647 | * notification exclusion list in rtnetlink_event() when adding new | 
|  | 1648 | * types. | 
|  | 1649 | */ | 
|  | 1650 | #define NETDEV_UP	0x0001	/* For now you can't veto a device up/down */ | 
|  | 1651 | #define NETDEV_DOWN	0x0002 | 
|  | 1652 | #define NETDEV_REBOOT	0x0003	/* Tell a protocol stack a network interface | 
|  | 1653 | detected a hardware crash and restarted | 
|  | 1654 | - we can use this eg to kick tcp sessions | 
|  | 1655 | once done */ | 
|  | 1656 | #define NETDEV_CHANGE	0x0004	/* Notify device state change */ | 
|  | 1657 | #define NETDEV_REGISTER 0x0005 | 
|  | 1658 | #define NETDEV_UNREGISTER	0x0006 | 
|  | 1659 | #define NETDEV_CHANGEMTU	0x0007 | 
|  | 1660 | #define NETDEV_CHANGEADDR	0x0008 | 
|  | 1661 | #define NETDEV_GOING_DOWN	0x0009 | 
|  | 1662 | #define NETDEV_CHANGENAME	0x000A | 
|  | 1663 | #define NETDEV_FEAT_CHANGE	0x000B | 
|  | 1664 | #define NETDEV_BONDING_FAILOVER 0x000C | 
|  | 1665 | #define NETDEV_PRE_UP		0x000D | 
|  | 1666 | #define NETDEV_PRE_TYPE_CHANGE	0x000E | 
|  | 1667 | #define NETDEV_POST_TYPE_CHANGE	0x000F | 
|  | 1668 | #define NETDEV_POST_INIT	0x0010 | 
| Eric Dumazet | 0115e8e | 2012-08-22 17:19:46 +0000 | [diff] [blame] | 1669 | #define NETDEV_UNREGISTER_FINAL 0x0011 | 
| Amerigo Wang | dcfe142 | 2011-07-25 17:13:09 -0700 | [diff] [blame] | 1670 | #define NETDEV_RELEASE		0x0012 | 
|  | 1671 | #define NETDEV_NOTIFY_PEERS	0x0013 | 
|  | 1672 | #define NETDEV_JOIN		0x0014 | 
| Jiri Pirko | 42e52bf | 2013-05-25 04:12:10 +0000 | [diff] [blame] | 1673 | #define NETDEV_CHANGEUPPER	0x0015 | 
| Jiri Pirko | 4aa5dee | 2013-07-20 12:13:53 +0200 | [diff] [blame] | 1674 | #define NETDEV_RESEND_IGMP	0x0016 | 
| Amerigo Wang | dcfe142 | 2011-07-25 17:13:09 -0700 | [diff] [blame] | 1675 |  | 
|  | 1676 | extern int register_netdevice_notifier(struct notifier_block *nb); | 
|  | 1677 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | 
| Jiri Pirko | 351638e | 2013-05-28 01:30:21 +0000 | [diff] [blame] | 1678 |  | 
|  | 1679 | struct netdev_notifier_info { | 
|  | 1680 | struct net_device *dev; | 
|  | 1681 | }; | 
|  | 1682 |  | 
| Jiri Pirko | be9efd3 | 2013-05-28 01:30:22 +0000 | [diff] [blame] | 1683 | struct netdev_notifier_change_info { | 
|  | 1684 | struct netdev_notifier_info info; /* must be first */ | 
|  | 1685 | unsigned int flags_changed; | 
|  | 1686 | }; | 
|  | 1687 |  | 
| Cong Wang | 75538c2 | 2013-05-29 11:30:50 +0800 | [diff] [blame] | 1688 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, | 
|  | 1689 | struct net_device *dev) | 
|  | 1690 | { | 
|  | 1691 | info->dev = dev; | 
|  | 1692 | } | 
|  | 1693 |  | 
| Jiri Pirko | 351638e | 2013-05-28 01:30:21 +0000 | [diff] [blame] | 1694 | static inline struct net_device * | 
|  | 1695 | netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) | 
|  | 1696 | { | 
|  | 1697 | return info->dev; | 
|  | 1698 | } | 
|  | 1699 |  | 
|  | 1700 | extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, | 
|  | 1701 | struct netdev_notifier_info *info); | 
| Amerigo Wang | dcfe142 | 2011-07-25 17:13:09 -0700 | [diff] [blame] | 1702 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 
|  | 1703 |  | 
|  | 1704 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1705 | extern rwlock_t				dev_base_lock;		/* Device list lock */ | 
|  | 1706 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1707 | #define for_each_netdev(net, d)		\ | 
|  | 1708 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | 
| Eric W. Biederman | dcbccbd4 | 2009-11-29 22:25:26 +0000 | [diff] [blame] | 1709 | #define for_each_netdev_reverse(net, d)	\ | 
|  | 1710 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | 
| Eric Dumazet | c6d14c8 | 2009-11-04 05:43:23 -0800 | [diff] [blame] | 1711 | #define for_each_netdev_rcu(net, d)		\ | 
|  | 1712 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1713 | #define for_each_netdev_safe(net, d, n)	\ | 
|  | 1714 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | 
|  | 1715 | #define for_each_netdev_continue(net, d)		\ | 
|  | 1716 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | 
| stephen hemminger | 254245d | 2009-11-10 07:54:47 +0000 | [diff] [blame] | 1717 | #define for_each_netdev_continue_rcu(net, d)		\ | 
|  | 1718 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | 
| nikolay@redhat.com | 8a7fbfa | 2013-03-12 02:49:01 +0000 | [diff] [blame] | 1719 | #define for_each_netdev_in_bond_rcu(bond, slave)	\ | 
|  | 1720 | for_each_netdev_rcu(&init_net, slave)	\ | 
|  | 1721 | if (netdev_master_upper_dev_get_rcu(slave) == bond) | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 1722 | #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list) | 
|  | 1723 |  | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 1724 | static inline struct net_device *next_net_device(struct net_device *dev) | 
|  | 1725 | { | 
|  | 1726 | struct list_head *lh; | 
|  | 1727 | struct net *net; | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 1728 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 1729 | net = dev_net(dev); | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 1730 | lh = dev->dev_list.next; | 
|  | 1731 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 
|  | 1732 | } | 
|  | 1733 |  | 
| Eric Dumazet | ce81b76 | 2009-11-11 17:34:30 +0000 | [diff] [blame] | 1734 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) | 
|  | 1735 | { | 
|  | 1736 | struct list_head *lh; | 
|  | 1737 | struct net *net; | 
|  | 1738 |  | 
|  | 1739 | net = dev_net(dev); | 
| Eric Dumazet | ccf4343 | 2011-01-26 18:08:02 +0000 | [diff] [blame] | 1740 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); | 
| Eric Dumazet | ce81b76 | 2009-11-11 17:34:30 +0000 | [diff] [blame] | 1741 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 
|  | 1742 | } | 
|  | 1743 |  | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 1744 | static inline struct net_device *first_net_device(struct net *net) | 
|  | 1745 | { | 
|  | 1746 | return list_empty(&net->dev_base_head) ? NULL : | 
|  | 1747 | net_device_entry(net->dev_base_head.next); | 
|  | 1748 | } | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 1749 |  | 
| Eric Dumazet | ccf4343 | 2011-01-26 18:08:02 +0000 | [diff] [blame] | 1750 | static inline struct net_device *first_net_device_rcu(struct net *net) | 
|  | 1751 | { | 
|  | 1752 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | 
|  | 1753 |  | 
|  | 1754 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 
|  | 1755 | } | 
|  | 1756 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1757 | extern int 			netdev_boot_setup_check(struct net_device *dev); | 
|  | 1758 | extern unsigned long		netdev_boot_base(const char *prefix, int unit); | 
| Eric Dumazet | 941666c | 2010-12-05 01:23:53 +0000 | [diff] [blame] | 1759 | extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | 
|  | 1760 | const char *hwaddr); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1761 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | 
|  | 1762 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | extern void		dev_add_pack(struct packet_type *pt); | 
|  | 1764 | extern void		dev_remove_pack(struct packet_type *pt); | 
|  | 1765 | extern void		__dev_remove_pack(struct packet_type *pt); | 
| Vlad Yasevich | 62532da | 2012-11-15 08:49:10 +0000 | [diff] [blame] | 1766 | extern void		dev_add_offload(struct packet_offload *po); | 
|  | 1767 | extern void		dev_remove_offload(struct packet_offload *po); | 
|  | 1768 | extern void		__dev_remove_offload(struct packet_offload *po); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1769 |  | 
| Eric Dumazet | bb69ae0 | 2010-06-07 11:42:13 +0000 | [diff] [blame] | 1770 | extern struct net_device	*dev_get_by_flags_rcu(struct net *net, unsigned short flags, | 
|  | 1771 | unsigned short mask); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1772 | extern struct net_device	*dev_get_by_name(struct net *net, const char *name); | 
| Eric Dumazet | 72c9528 | 2009-10-30 07:11:27 +0000 | [diff] [blame] | 1773 | extern struct net_device	*dev_get_by_name_rcu(struct net *net, const char *name); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1774 | extern struct net_device	*__dev_get_by_name(struct net *net, const char *name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1775 | extern int		dev_alloc_name(struct net_device *dev, const char *name); | 
|  | 1776 | extern int		dev_open(struct net_device *dev); | 
|  | 1777 | extern int		dev_close(struct net_device *dev); | 
| Ben Hutchings | 0187bdf | 2008-06-19 16:15:47 -0700 | [diff] [blame] | 1778 | extern void		dev_disable_lro(struct net_device *dev); | 
| Michel Machado | 95603e2 | 2012-06-12 10:16:35 +0000 | [diff] [blame] | 1779 | extern int		dev_loopback_xmit(struct sk_buff *newskb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1780 | extern int		dev_queue_xmit(struct sk_buff *skb); | 
|  | 1781 | extern int		register_netdevice(struct net_device *dev); | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 1782 | extern void		unregister_netdevice_queue(struct net_device *dev, | 
|  | 1783 | struct list_head *head); | 
| Eric Dumazet | 9b5e383 | 2009-10-27 07:04:19 +0000 | [diff] [blame] | 1784 | extern void		unregister_netdevice_many(struct list_head *head); | 
| Eric Dumazet | 44a0873 | 2009-10-27 07:03:04 +0000 | [diff] [blame] | 1785 | static inline void unregister_netdevice(struct net_device *dev) | 
|  | 1786 | { | 
|  | 1787 | unregister_netdevice_queue(dev, NULL); | 
|  | 1788 | } | 
|  | 1789 |  | 
| Eric Dumazet | 29b4433 | 2010-10-11 10:22:12 +0000 | [diff] [blame] | 1790 | extern int 		netdev_refcnt_read(const struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | extern void		free_netdev(struct net_device *dev); | 
|  | 1792 | extern void		synchronize_net(void); | 
| Benjamin Herrenschmidt | 937f1ba | 2009-01-14 21:05:05 -0800 | [diff] [blame] | 1793 | extern int		init_dummy_netdev(struct net_device *dev); | 
|  | 1794 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1795 | extern struct net_device	*dev_get_by_index(struct net *net, int ifindex); | 
|  | 1796 | extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex); | 
| Eric Dumazet | fb699dfd | 2009-10-19 19:18:49 +0000 | [diff] [blame] | 1797 | extern struct net_device	*dev_get_by_index_rcu(struct net *net, int ifindex); | 
| Nicolas Schichan | 5dbe7c1 | 2013-06-26 17:23:42 +0200 | [diff] [blame] | 1798 | extern int		netdev_get_name(struct net *net, char *name, int ifindex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | extern int		dev_restart(struct net_device *dev); | 
|  | 1800 | #ifdef CONFIG_NETPOLL_TRAP | 
|  | 1801 | extern int		netpoll_trap(void); | 
|  | 1802 | #endif | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1803 | extern int	       skb_gro_receive(struct sk_buff **head, | 
|  | 1804 | struct sk_buff *skb); | 
|  | 1805 |  | 
|  | 1806 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | 
|  | 1807 | { | 
|  | 1808 | return NAPI_GRO_CB(skb)->data_offset; | 
|  | 1809 | } | 
|  | 1810 |  | 
|  | 1811 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | 
|  | 1812 | { | 
|  | 1813 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | 
|  | 1814 | } | 
|  | 1815 |  | 
|  | 1816 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | 
|  | 1817 | { | 
|  | 1818 | NAPI_GRO_CB(skb)->data_offset += len; | 
|  | 1819 | } | 
|  | 1820 |  | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 1821 | static inline void *skb_gro_header_fast(struct sk_buff *skb, | 
|  | 1822 | unsigned int offset) | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1823 | { | 
| Herbert Xu | 78a478d | 2009-05-26 18:50:21 +0000 | [diff] [blame] | 1824 | return NAPI_GRO_CB(skb)->frag0 + offset; | 
| Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 1825 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 |  | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 1827 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) | 
|  | 1828 | { | 
|  | 1829 | return NAPI_GRO_CB(skb)->frag0_len < hlen; | 
|  | 1830 | } | 
|  | 1831 |  | 
|  | 1832 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, | 
|  | 1833 | unsigned int offset) | 
|  | 1834 | { | 
| Herbert Xu | 17dd759 | 2011-07-27 06:16:28 -0700 | [diff] [blame] | 1835 | if (!pskb_may_pull(skb, hlen)) | 
|  | 1836 | return NULL; | 
|  | 1837 |  | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 1838 | NAPI_GRO_CB(skb)->frag0 = NULL; | 
|  | 1839 | NAPI_GRO_CB(skb)->frag0_len = 0; | 
| Herbert Xu | 17dd759 | 2011-07-27 06:16:28 -0700 | [diff] [blame] | 1840 | return skb->data + offset; | 
| Herbert Xu | a5b1cf2 | 2009-05-26 18:50:28 +0000 | [diff] [blame] | 1841 | } | 
|  | 1842 |  | 
| Herbert Xu | aa4b9f5 | 2009-02-08 18:00:37 +0000 | [diff] [blame] | 1843 | static inline void *skb_gro_mac_header(struct sk_buff *skb) | 
|  | 1844 | { | 
| Herbert Xu | 78d3fd0 | 2009-05-26 18:50:23 +0000 | [diff] [blame] | 1845 | return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); | 
| Herbert Xu | aa4b9f5 | 2009-02-08 18:00:37 +0000 | [diff] [blame] | 1846 | } | 
|  | 1847 |  | 
| Herbert Xu | 36e7b1b | 2009-04-27 05:44:45 -0700 | [diff] [blame] | 1848 | static inline void *skb_gro_network_header(struct sk_buff *skb) | 
|  | 1849 | { | 
| Herbert Xu | 78d3fd0 | 2009-05-26 18:50:23 +0000 | [diff] [blame] | 1850 | return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + | 
|  | 1851 | skb_network_offset(skb); | 
| Herbert Xu | 36e7b1b | 2009-04-27 05:44:45 -0700 | [diff] [blame] | 1852 | } | 
|  | 1853 |  | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1854 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 
|  | 1855 | unsigned short type, | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 1856 | const void *daddr, const void *saddr, | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 1857 | unsigned int len) | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1858 | { | 
| Ursula Braun | f1ecfd5 | 2007-10-22 16:16:14 +0200 | [diff] [blame] | 1859 | if (!dev->header_ops || !dev->header_ops->create) | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1860 | return 0; | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 1861 |  | 
|  | 1862 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 1863 | } | 
|  | 1864 |  | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 1865 | static inline int dev_parse_header(const struct sk_buff *skb, | 
|  | 1866 | unsigned char *haddr) | 
|  | 1867 | { | 
|  | 1868 | const struct net_device *dev = skb->dev; | 
|  | 1869 |  | 
| Patrick McHardy | 1b83336 | 2007-10-18 05:09:28 -0700 | [diff] [blame] | 1870 | if (!dev->header_ops || !dev->header_ops->parse) | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 1871 | return 0; | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 1872 | return dev->header_ops->parse(skb, haddr); | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 1873 | } | 
|  | 1874 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1875 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 
|  | 1876 | extern int		register_gifconf(unsigned int family, gifconf_func_t * gifconf); | 
|  | 1877 | static inline int unregister_gifconf(unsigned int family) | 
|  | 1878 | { | 
|  | 1879 | return register_gifconf(family, NULL); | 
|  | 1880 | } | 
|  | 1881 |  | 
| Willem de Bruijn | 99bbc70 | 2013-05-20 04:02:32 +0000 | [diff] [blame] | 1882 | #ifdef CONFIG_NET_FLOW_LIMIT | 
| Willem de Bruijn | 5f121b9 | 2013-06-13 15:29:38 -0400 | [diff] [blame] | 1883 | #define FLOW_LIMIT_HISTORY	(1 << 7)  /* must be ^2 and !overflow buckets */ | 
| Willem de Bruijn | 99bbc70 | 2013-05-20 04:02:32 +0000 | [diff] [blame] | 1884 | struct sd_flow_limit { | 
|  | 1885 | u64			count; | 
|  | 1886 | unsigned int		num_buckets; | 
|  | 1887 | unsigned int		history_head; | 
|  | 1888 | u16			history[FLOW_LIMIT_HISTORY]; | 
|  | 1889 | u8			buckets[]; | 
|  | 1890 | }; | 
|  | 1891 |  | 
|  | 1892 | extern int netdev_flow_limit_table_len; | 
|  | 1893 | #endif /* CONFIG_NET_FLOW_LIMIT */ | 
|  | 1894 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1895 | /* | 
| Eric Dumazet | 8875127 | 2010-04-19 05:07:33 +0000 | [diff] [blame] | 1896 | * Incoming packets are placed on per-cpu queues | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | */ | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 1898 | struct softnet_data { | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1899 | struct Qdisc		*output_queue; | 
| Changli Gao | a9cbd58 | 2010-04-26 23:06:24 +0000 | [diff] [blame] | 1900 | struct Qdisc		**output_queue_tailp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1901 | struct list_head	poll_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | struct sk_buff		*completion_queue; | 
| Changli Gao | 6e7676c | 2010-04-27 15:07:33 -0700 | [diff] [blame] | 1903 | struct sk_buff_head	process_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1904 |  | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 1905 | /* stats */ | 
| David S. Miller | cd7b539 | 2010-05-02 22:27:59 -0700 | [diff] [blame] | 1906 | unsigned int		processed; | 
|  | 1907 | unsigned int		time_squeeze; | 
|  | 1908 | unsigned int		cpu_collision; | 
|  | 1909 | unsigned int		received_rps; | 
| Changli Gao | dee4287 | 2010-05-02 05:42:16 +0000 | [diff] [blame] | 1910 |  | 
| Changli Gao | fd793d8 | 2010-04-15 00:16:59 -0700 | [diff] [blame] | 1911 | #ifdef CONFIG_RPS | 
| Eric Dumazet | 8875127 | 2010-04-19 05:07:33 +0000 | [diff] [blame] | 1912 | struct softnet_data	*rps_ipi_list; | 
|  | 1913 |  | 
|  | 1914 | /* Elements below can be accessed between CPUs for RPS */ | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1915 | struct call_single_data	csd ____cacheline_aligned_in_smp; | 
| Eric Dumazet | 8875127 | 2010-04-19 05:07:33 +0000 | [diff] [blame] | 1916 | struct softnet_data	*rps_ipi_next; | 
|  | 1917 | unsigned int		cpu; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 1918 | unsigned int		input_queue_head; | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 1919 | unsigned int		input_queue_tail; | 
| Tom Herbert | 1e94d72 | 2010-03-18 17:45:44 -0700 | [diff] [blame] | 1920 | #endif | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 1921 | unsigned int		dropped; | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1922 | struct sk_buff_head	input_pkt_queue; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1923 | struct napi_struct	backlog; | 
| Willem de Bruijn | 99bbc70 | 2013-05-20 04:02:32 +0000 | [diff] [blame] | 1924 |  | 
|  | 1925 | #ifdef CONFIG_NET_FLOW_LIMIT | 
| Willem de Bruijn | 5f121b9 | 2013-06-13 15:29:38 -0400 | [diff] [blame] | 1926 | struct sd_flow_limit __rcu *flow_limit; | 
| Willem de Bruijn | 99bbc70 | 2013-05-20 04:02:32 +0000 | [diff] [blame] | 1927 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1928 | }; | 
|  | 1929 |  | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 1930 | static inline void input_queue_head_incr(struct softnet_data *sd) | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 1931 | { | 
|  | 1932 | #ifdef CONFIG_RPS | 
| Tom Herbert | 76cc8b1 | 2010-05-20 18:37:59 +0000 | [diff] [blame] | 1933 | sd->input_queue_head++; | 
|  | 1934 | #endif | 
|  | 1935 | } | 
|  | 1936 |  | 
|  | 1937 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | 
|  | 1938 | unsigned int *qtail) | 
|  | 1939 | { | 
|  | 1940 | #ifdef CONFIG_RPS | 
|  | 1941 | *qtail = ++sd->input_queue_tail; | 
| Tom Herbert | fec5e65 | 2010-04-16 16:01:27 -0700 | [diff] [blame] | 1942 | #endif | 
|  | 1943 | } | 
|  | 1944 |  | 
| Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 1945 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1946 |  | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1947 | extern void __netif_schedule(struct Qdisc *q); | 
| David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 1948 |  | 
|  | 1949 | static inline void netif_schedule_queue(struct netdev_queue *txq) | 
|  | 1950 | { | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 1951 | if (!(txq->state & QUEUE_STATE_ANY_XOFF)) | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1952 | __netif_schedule(txq->qdisc); | 
| David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 1953 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1954 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1955 | static inline void netif_tx_schedule_all(struct net_device *dev) | 
|  | 1956 | { | 
|  | 1957 | unsigned int i; | 
|  | 1958 |  | 
|  | 1959 | for (i = 0; i < dev->num_tx_queues; i++) | 
|  | 1960 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | 
|  | 1961 | } | 
|  | 1962 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 1963 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | 
|  | 1964 | { | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 1965 | clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 1966 | } | 
|  | 1967 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1968 | /** | 
|  | 1969 | *	netif_start_queue - allow transmit | 
|  | 1970 | *	@dev: network device | 
|  | 1971 | * | 
|  | 1972 | *	Allow upper layers to call the device hard_start_xmit routine. | 
|  | 1973 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1974 | static inline void netif_start_queue(struct net_device *dev) | 
|  | 1975 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 1976 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | } | 
|  | 1978 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 1979 | static inline void netif_tx_start_all_queues(struct net_device *dev) | 
|  | 1980 | { | 
|  | 1981 | unsigned int i; | 
|  | 1982 |  | 
|  | 1983 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|  | 1984 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
|  | 1985 | netif_tx_start_queue(txq); | 
|  | 1986 | } | 
|  | 1987 | } | 
|  | 1988 |  | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 1989 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 | { | 
|  | 1991 | #ifdef CONFIG_NETPOLL_TRAP | 
| Sergei Shtylyov | 5f286e1 | 2007-04-28 20:57:37 -0700 | [diff] [blame] | 1992 | if (netpoll_trap()) { | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 1993 | netif_tx_start_queue(dev_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1994 | return; | 
| Sergei Shtylyov | 5f286e1 | 2007-04-28 20:57:37 -0700 | [diff] [blame] | 1995 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1996 | #endif | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 1997 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 1998 | __netif_schedule(dev_queue->qdisc); | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 1999 | } | 
|  | 2000 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 2001 | /** | 
|  | 2002 | *	netif_wake_queue - restart transmit | 
|  | 2003 | *	@dev: network device | 
|  | 2004 | * | 
|  | 2005 | *	Allow upper layers to call the device hard_start_xmit routine. | 
|  | 2006 | *	Used for flow control when transmit resources are available. | 
|  | 2007 | */ | 
| David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 2008 | static inline void netif_wake_queue(struct net_device *dev) | 
|  | 2009 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2010 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2011 | } | 
|  | 2012 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2013 | static inline void netif_tx_wake_all_queues(struct net_device *dev) | 
|  | 2014 | { | 
|  | 2015 | unsigned int i; | 
|  | 2016 |  | 
|  | 2017 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|  | 2018 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
|  | 2019 | netif_tx_wake_queue(txq); | 
|  | 2020 | } | 
|  | 2021 | } | 
|  | 2022 |  | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 2023 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | 
|  | 2024 | { | 
| Guillaume Chazarain | 18543a6 | 2010-11-06 06:39:32 +0000 | [diff] [blame] | 2025 | if (WARN_ON(!dev_queue)) { | 
| Joe Perches | 256ee43 | 2011-03-01 07:06:12 +0000 | [diff] [blame] | 2026 | pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); | 
| Guillaume Chazarain | 18543a6 | 2010-11-06 06:39:32 +0000 | [diff] [blame] | 2027 | return; | 
|  | 2028 | } | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 2029 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 2030 | } | 
|  | 2031 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2032 | /** | 
|  | 2033 | *	netif_stop_queue - stop transmitted packets | 
|  | 2034 | *	@dev: network device | 
|  | 2035 | * | 
|  | 2036 | *	Stop upper layers calling the device hard_start_xmit routine. | 
|  | 2037 | *	Used for flow control when transmit resources are unavailable. | 
|  | 2038 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | static inline void netif_stop_queue(struct net_device *dev) | 
|  | 2040 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2041 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2042 | } | 
|  | 2043 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2044 | static inline void netif_tx_stop_all_queues(struct net_device *dev) | 
|  | 2045 | { | 
|  | 2046 | unsigned int i; | 
|  | 2047 |  | 
|  | 2048 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|  | 2049 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
|  | 2050 | netif_tx_stop_queue(txq); | 
|  | 2051 | } | 
|  | 2052 | } | 
|  | 2053 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2054 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 2055 | { | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 2056 | return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); | 
| Dave Jones | d29f749 | 2008-07-22 14:09:06 -0700 | [diff] [blame] | 2057 | } | 
|  | 2058 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2059 | /** | 
|  | 2060 | *	netif_queue_stopped - test if transmit queue is flowblocked | 
|  | 2061 | *	@dev: network device | 
|  | 2062 | * | 
|  | 2063 | *	Test if transmit queue on device is currently unable to send. | 
|  | 2064 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2065 | static inline bool netif_queue_stopped(const struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2066 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2067 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2068 | } | 
|  | 2069 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2070 | static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2071 | { | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 2072 | return dev_queue->state & QUEUE_STATE_ANY_XOFF; | 
|  | 2073 | } | 
|  | 2074 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2075 | static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 2076 | { | 
|  | 2077 | return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; | 
|  | 2078 | } | 
|  | 2079 |  | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2080 | static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, | 
|  | 2081 | unsigned int bytes) | 
|  | 2082 | { | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 2083 | #ifdef CONFIG_BQL | 
|  | 2084 | dql_queued(&dev_queue->dql, bytes); | 
| Alexander Duyck | b37c0fb | 2012-02-07 02:29:06 +0000 | [diff] [blame] | 2085 |  | 
|  | 2086 | if (likely(dql_avail(&dev_queue->dql) >= 0)) | 
|  | 2087 | return; | 
|  | 2088 |  | 
|  | 2089 | set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | 
|  | 2090 |  | 
|  | 2091 | /* | 
|  | 2092 | * The XOFF flag must be set before checking the dql_avail below, | 
|  | 2093 | * because in netdev_tx_completed_queue we update the dql_completed | 
|  | 2094 | * before checking the XOFF flag. | 
|  | 2095 | */ | 
|  | 2096 | smp_mb(); | 
|  | 2097 |  | 
|  | 2098 | /* check again in case another CPU has just made room avail */ | 
|  | 2099 | if (unlikely(dql_avail(&dev_queue->dql) >= 0)) | 
|  | 2100 | clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 2101 | #endif | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2102 | } | 
|  | 2103 |  | 
| Florian Fainelli | 0042d0c | 2013-09-06 16:58:00 +0100 | [diff] [blame] | 2104 | /** | 
|  | 2105 | * 	netdev_sent_queue - report the number of bytes queued to hardware | 
|  | 2106 | * 	@dev: network device | 
|  | 2107 | * 	@bytes: number of bytes queued to the hardware device queue | 
|  | 2108 | * | 
|  | 2109 | * 	Report the number of bytes queued for sending/completion to the network | 
|  | 2110 | * 	device hardware queue. @bytes should be a good approximation and should | 
|  | 2111 | * 	exactly match netdev_completed_queue() @bytes | 
|  | 2112 | */ | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2113 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) | 
|  | 2114 | { | 
|  | 2115 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); | 
|  | 2116 | } | 
|  | 2117 |  | 
|  | 2118 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 2119 | unsigned int pkts, unsigned int bytes) | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2120 | { | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 2121 | #ifdef CONFIG_BQL | 
| Alexander Duyck | b37c0fb | 2012-02-07 02:29:06 +0000 | [diff] [blame] | 2122 | if (unlikely(!bytes)) | 
|  | 2123 | return; | 
|  | 2124 |  | 
|  | 2125 | dql_completed(&dev_queue->dql, bytes); | 
|  | 2126 |  | 
|  | 2127 | /* | 
|  | 2128 | * Without the memory barrier there is a small possiblity that | 
|  | 2129 | * netdev_tx_sent_queue will miss the update and cause the queue to | 
|  | 2130 | * be stopped forever | 
|  | 2131 | */ | 
|  | 2132 | smp_mb(); | 
|  | 2133 |  | 
|  | 2134 | if (dql_avail(&dev_queue->dql) < 0) | 
|  | 2135 | return; | 
|  | 2136 |  | 
|  | 2137 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) | 
|  | 2138 | netif_schedule_queue(dev_queue); | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 2139 | #endif | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2140 | } | 
|  | 2141 |  | 
| Florian Fainelli | 0042d0c | 2013-09-06 16:58:00 +0100 | [diff] [blame] | 2142 | /** | 
|  | 2143 | * 	netdev_completed_queue - report bytes and packets completed by device | 
|  | 2144 | * 	@dev: network device | 
|  | 2145 | * 	@pkts: actual number of packets sent over the medium | 
|  | 2146 | * 	@bytes: actual number of bytes sent over the medium | 
|  | 2147 | * | 
|  | 2148 | * 	Report the number of bytes and packets transmitted by the network device | 
|  | 2149 | * 	hardware queue over the physical medium, @bytes must exactly match the | 
|  | 2150 | * 	@bytes amount passed to netdev_sent_queue() | 
|  | 2151 | */ | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2152 | static inline void netdev_completed_queue(struct net_device *dev, | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 2153 | unsigned int pkts, unsigned int bytes) | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2154 | { | 
|  | 2155 | netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); | 
|  | 2156 | } | 
|  | 2157 |  | 
|  | 2158 | static inline void netdev_tx_reset_queue(struct netdev_queue *q) | 
|  | 2159 | { | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 2160 | #ifdef CONFIG_BQL | 
| Alexander Duyck | 5c49035 | 2012-02-07 02:29:01 +0000 | [diff] [blame] | 2161 | clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); | 
| Tom Herbert | 114cf58 | 2011-11-28 16:33:09 +0000 | [diff] [blame] | 2162 | dql_reset(&q->dql); | 
|  | 2163 | #endif | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2164 | } | 
|  | 2165 |  | 
| Florian Fainelli | 0042d0c | 2013-09-06 16:58:00 +0100 | [diff] [blame] | 2166 | /** | 
|  | 2167 | * 	netdev_reset_queue - reset the packets and bytes count of a network device | 
|  | 2168 | * 	@dev_queue: network device | 
|  | 2169 | * | 
|  | 2170 | * 	Reset the bytes and packet count of a network device and clear the | 
|  | 2171 | * 	software flow control OFF bit for this network device | 
|  | 2172 | */ | 
| Tom Herbert | c5d67bd | 2011-11-28 16:32:52 +0000 | [diff] [blame] | 2173 | static inline void netdev_reset_queue(struct net_device *dev_queue) | 
|  | 2174 | { | 
|  | 2175 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2176 | } | 
|  | 2177 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2178 | /** | 
|  | 2179 | *	netif_running - test if up | 
|  | 2180 | *	@dev: network device | 
|  | 2181 | * | 
|  | 2182 | *	Test if the device has been brought up. | 
|  | 2183 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2184 | static inline bool netif_running(const struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2185 | { | 
|  | 2186 | return test_bit(__LINK_STATE_START, &dev->state); | 
|  | 2187 | } | 
|  | 2188 |  | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2189 | /* | 
|  | 2190 | * Routines to manage the subqueues on a device.  We only need start | 
|  | 2191 | * stop, and a check if it's stopped.  All other device management is | 
|  | 2192 | * done at the overall netdevice level. | 
|  | 2193 | * Also test the device if we're multiqueue. | 
|  | 2194 | */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2195 |  | 
|  | 2196 | /** | 
|  | 2197 | *	netif_start_subqueue - allow sending packets on subqueue | 
|  | 2198 | *	@dev: network device | 
|  | 2199 | *	@queue_index: sub queue index | 
|  | 2200 | * | 
|  | 2201 | * Start individual transmit queue of a device with multiple transmit queues. | 
|  | 2202 | */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2203 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 
|  | 2204 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2205 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 2206 |  | 
|  | 2207 | netif_tx_start_queue(txq); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2208 | } | 
|  | 2209 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2210 | /** | 
|  | 2211 | *	netif_stop_subqueue - stop sending packets on subqueue | 
|  | 2212 | *	@dev: network device | 
|  | 2213 | *	@queue_index: sub queue index | 
|  | 2214 | * | 
|  | 2215 | * Stop individual transmit queue of a device with multiple transmit queues. | 
|  | 2216 | */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2217 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | 
|  | 2218 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2219 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2220 | #ifdef CONFIG_NETPOLL_TRAP | 
|  | 2221 | if (netpoll_trap()) | 
|  | 2222 | return; | 
|  | 2223 | #endif | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 2224 | netif_tx_stop_queue(txq); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2225 | } | 
|  | 2226 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2227 | /** | 
|  | 2228 | *	netif_subqueue_stopped - test status of subqueue | 
|  | 2229 | *	@dev: network device | 
|  | 2230 | *	@queue_index: sub queue index | 
|  | 2231 | * | 
|  | 2232 | * Check individual transmit queue of a device with multiple transmit queues. | 
|  | 2233 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2234 | static inline bool __netif_subqueue_stopped(const struct net_device *dev, | 
|  | 2235 | u16 queue_index) | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2236 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2237 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 2238 |  | 
|  | 2239 | return netif_tx_queue_stopped(txq); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2240 | } | 
|  | 2241 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2242 | static inline bool netif_subqueue_stopped(const struct net_device *dev, | 
|  | 2243 | struct sk_buff *skb) | 
| Pavel Emelyanov | 668f895 | 2007-10-21 17:01:56 -0700 | [diff] [blame] | 2244 | { | 
|  | 2245 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | 
|  | 2246 | } | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2247 |  | 
|  | 2248 | /** | 
|  | 2249 | *	netif_wake_subqueue - allow sending packets on subqueue | 
|  | 2250 | *	@dev: network device | 
|  | 2251 | *	@queue_index: sub queue index | 
|  | 2252 | * | 
|  | 2253 | * Resume individual transmit queue of a device with multiple transmit queues. | 
|  | 2254 | */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2255 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | 
|  | 2256 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2257 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2258 | #ifdef CONFIG_NETPOLL_TRAP | 
|  | 2259 | if (netpoll_trap()) | 
|  | 2260 | return; | 
|  | 2261 | #endif | 
| Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 2262 | if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) | 
| David S. Miller | 37437bb | 2008-07-16 02:15:04 -0700 | [diff] [blame] | 2263 | __netif_schedule(txq->qdisc); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2264 | } | 
|  | 2265 |  | 
| Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 2266 | #ifdef CONFIG_XPS | 
| Alexander Duyck | 537c00d | 2013-01-10 08:57:02 +0000 | [diff] [blame] | 2267 | extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, | 
|  | 2268 | u16 index); | 
|  | 2269 | #else | 
|  | 2270 | static inline int netif_set_xps_queue(struct net_device *dev, | 
|  | 2271 | struct cpumask *mask, | 
|  | 2272 | u16 index) | 
|  | 2273 | { | 
|  | 2274 | return 0; | 
|  | 2275 | } | 
|  | 2276 | #endif | 
|  | 2277 |  | 
| Vladislav Zolotarov | a3d22a6 | 2010-12-13 06:27:10 +0000 | [diff] [blame] | 2278 | /* | 
|  | 2279 | * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used | 
|  | 2280 | * as a distribution range limit for the returned value. | 
|  | 2281 | */ | 
|  | 2282 | static inline u16 skb_tx_hash(const struct net_device *dev, | 
|  | 2283 | const struct sk_buff *skb) | 
|  | 2284 | { | 
|  | 2285 | return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); | 
|  | 2286 | } | 
|  | 2287 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2288 | /** | 
|  | 2289 | *	netif_is_multiqueue - test if device has multiple transmit queues | 
|  | 2290 | *	@dev: network device | 
|  | 2291 | * | 
|  | 2292 | * Check if device has multiple transmit queues | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2293 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2294 | static inline bool netif_is_multiqueue(const struct net_device *dev) | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2295 | { | 
| Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 2296 | return dev->num_tx_queues > 1; | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2297 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2298 |  | 
| Tom Herbert | e648493 | 2010-10-18 18:04:39 +0000 | [diff] [blame] | 2299 | extern int netif_set_real_num_tx_queues(struct net_device *dev, | 
|  | 2300 | unsigned int txq); | 
| John Fastabend | f0796d5 | 2010-07-01 13:21:57 +0000 | [diff] [blame] | 2301 |  | 
| Ben Hutchings | 62fe0b4 | 2010-09-27 08:24:33 +0000 | [diff] [blame] | 2302 | #ifdef CONFIG_RPS | 
|  | 2303 | extern int netif_set_real_num_rx_queues(struct net_device *dev, | 
|  | 2304 | unsigned int rxq); | 
|  | 2305 | #else | 
|  | 2306 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | 
|  | 2307 | unsigned int rxq) | 
|  | 2308 | { | 
|  | 2309 | return 0; | 
|  | 2310 | } | 
|  | 2311 | #endif | 
|  | 2312 |  | 
| Ben Hutchings | 3171d02 | 2010-09-27 08:24:49 +0000 | [diff] [blame] | 2313 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, | 
|  | 2314 | const struct net_device *from_dev) | 
|  | 2315 | { | 
| Jiri Pirko | ee6ae1a | 2012-07-20 02:28:46 +0000 | [diff] [blame] | 2316 | int err; | 
|  | 2317 |  | 
|  | 2318 | err = netif_set_real_num_tx_queues(to_dev, | 
|  | 2319 | from_dev->real_num_tx_queues); | 
|  | 2320 | if (err) | 
|  | 2321 | return err; | 
| Ben Hutchings | 3171d02 | 2010-09-27 08:24:49 +0000 | [diff] [blame] | 2322 | #ifdef CONFIG_RPS | 
|  | 2323 | return netif_set_real_num_rx_queues(to_dev, | 
|  | 2324 | from_dev->real_num_rx_queues); | 
|  | 2325 | #else | 
|  | 2326 | return 0; | 
|  | 2327 | #endif | 
|  | 2328 | } | 
|  | 2329 |  | 
| Yuval Mintz | 16917b8 | 2012-07-01 03:18:50 +0000 | [diff] [blame] | 2330 | #define DEFAULT_MAX_NUM_RSS_QUEUES	(8) | 
|  | 2331 | extern int netif_get_num_default_rss_queues(void); | 
|  | 2332 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2333 | /* Use this variant when it is known for sure that it | 
| Matti Linnanvuori | 0ef4730 | 2008-03-28 16:33:00 -0700 | [diff] [blame] | 2334 | * is executing from hardware interrupt context or with hardware interrupts | 
|  | 2335 | * disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2336 | */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2337 | extern void dev_kfree_skb_irq(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2338 |  | 
|  | 2339 | /* Use this variant in places where it could be invoked | 
| Matti Linnanvuori | 0ef4730 | 2008-03-28 16:33:00 -0700 | [diff] [blame] | 2340 | * from either hardware interrupt or other context, with hardware interrupts | 
|  | 2341 | * either disabled or enabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2342 | */ | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 2343 | extern void dev_kfree_skb_any(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2344 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2345 | extern int		netif_rx(struct sk_buff *skb); | 
|  | 2346 | extern int		netif_rx_ni(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2347 | extern int		netif_receive_skb(struct sk_buff *skb); | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 2348 | extern gro_result_t	napi_gro_receive(struct napi_struct *napi, | 
| Herbert Xu | d565b0a | 2008-12-15 23:38:52 -0800 | [diff] [blame] | 2349 | struct sk_buff *skb); | 
| Eric Dumazet | 2e71a6f | 2012-10-06 08:08:49 +0000 | [diff] [blame] | 2350 | extern void		napi_gro_flush(struct napi_struct *napi, bool flush_old); | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 2351 | extern struct sk_buff *	napi_get_frags(struct napi_struct *napi); | 
| Ben Hutchings | c7c4b3b | 2009-10-29 21:36:53 -0700 | [diff] [blame] | 2352 | extern gro_result_t	napi_gro_frags(struct napi_struct *napi); | 
| Herbert Xu | 76620aa | 2009-04-16 02:02:07 -0700 | [diff] [blame] | 2353 |  | 
|  | 2354 | static inline void napi_free_frags(struct napi_struct *napi) | 
|  | 2355 | { | 
|  | 2356 | kfree_skb(napi->skb); | 
|  | 2357 | napi->skb = NULL; | 
|  | 2358 | } | 
|  | 2359 |  | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 2360 | extern int netdev_rx_handler_register(struct net_device *dev, | 
| Jiri Pirko | 93e2c32 | 2010-06-10 03:34:59 +0000 | [diff] [blame] | 2361 | rx_handler_func_t *rx_handler, | 
|  | 2362 | void *rx_handler_data); | 
| Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 2363 | extern void netdev_rx_handler_unregister(struct net_device *dev); | 
|  | 2364 |  | 
| David S. Miller | 95f050b | 2012-03-06 16:12:15 -0500 | [diff] [blame] | 2365 | extern bool		dev_valid_name(const char *name); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 2366 | extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 
|  | 2367 | extern int		dev_ethtool(struct net *net, struct ifreq *); | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 2368 | extern unsigned int	dev_get_flags(const struct net_device *); | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 2369 | extern int		__dev_change_flags(struct net_device *, unsigned int flags); | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 2370 | extern int		dev_change_flags(struct net_device *, unsigned int); | 
| Patrick McHardy | bd38081 | 2010-02-26 06:34:53 +0000 | [diff] [blame] | 2371 | extern void		__dev_notify_flags(struct net_device *, unsigned int old_flags); | 
| Stephen Hemminger | cf04a4c7 | 2008-09-30 02:22:14 -0700 | [diff] [blame] | 2372 | extern int		dev_change_name(struct net_device *, const char *); | 
| Stephen Hemminger | 0b815a1 | 2008-09-22 21:28:11 -0700 | [diff] [blame] | 2373 | extern int		dev_set_alias(struct net_device *, const char *, size_t); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 2374 | extern int		dev_change_net_namespace(struct net_device *, | 
|  | 2375 | struct net *, const char *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2376 | extern int		dev_set_mtu(struct net_device *, int); | 
| Vlad Dogaru | cbda10f | 2011-01-13 23:38:30 +0000 | [diff] [blame] | 2377 | extern void		dev_set_group(struct net_device *, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2378 | extern int		dev_set_mac_address(struct net_device *, | 
|  | 2379 | struct sockaddr *); | 
| Jiri Pirko | 4bf84c3 | 2012-12-27 23:49:37 +0000 | [diff] [blame] | 2380 | extern int		dev_change_carrier(struct net_device *, | 
|  | 2381 | bool new_carrier); | 
| Jiri Pirko | 66b52b0 | 2013-07-29 18:16:49 +0200 | [diff] [blame] | 2382 | extern int		dev_get_phys_port_id(struct net_device *dev, | 
|  | 2383 | struct netdev_phys_port_id *ppid); | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 2384 | extern int		dev_hard_start_xmit(struct sk_buff *skb, | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2385 | struct net_device *dev, | 
|  | 2386 | struct netdev_queue *txq); | 
| Arnd Bergmann | 4454096 | 2009-11-26 06:07:08 +0000 | [diff] [blame] | 2387 | extern int		dev_forward_skb(struct net_device *dev, | 
|  | 2388 | struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2389 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 2390 | extern int		netdev_budget; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2391 |  | 
|  | 2392 | /* Called by rtnetlink.c:rtnl_unlock() */ | 
|  | 2393 | extern void netdev_run_todo(void); | 
|  | 2394 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2395 | /** | 
|  | 2396 | *	dev_put - release reference to device | 
|  | 2397 | *	@dev: network device | 
|  | 2398 | * | 
| Benjamin Thery | 9ef4429 | 2007-10-10 21:18:17 -0700 | [diff] [blame] | 2399 | * Release reference to device to allow it to be freed. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2400 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2401 | static inline void dev_put(struct net_device *dev) | 
|  | 2402 | { | 
| Christoph Lameter | 933393f | 2011-12-22 11:58:51 -0600 | [diff] [blame] | 2403 | this_cpu_dec(*dev->pcpu_refcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2404 | } | 
|  | 2405 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2406 | /** | 
|  | 2407 | *	dev_hold - get reference to device | 
|  | 2408 | *	@dev: network device | 
|  | 2409 | * | 
| Benjamin Thery | 9ef4429 | 2007-10-10 21:18:17 -0700 | [diff] [blame] | 2410 | * Hold reference to device to keep it from being freed. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2411 | */ | 
| Stephen Hemminger | 1533306 | 2006-03-20 22:32:28 -0800 | [diff] [blame] | 2412 | static inline void dev_hold(struct net_device *dev) | 
|  | 2413 | { | 
| Christoph Lameter | 933393f | 2011-12-22 11:58:51 -0600 | [diff] [blame] | 2414 | this_cpu_inc(*dev->pcpu_refcnt); | 
| Stephen Hemminger | 1533306 | 2006-03-20 22:32:28 -0800 | [diff] [blame] | 2415 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2416 |  | 
|  | 2417 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | 
|  | 2418 | * and _off may be called from IRQ context, but it is caller | 
|  | 2419 | * who is responsible for serialization of these calls. | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2420 | * | 
|  | 2421 | * The name carrier is inappropriate, these functions should really be | 
|  | 2422 | * called netif_lowerlayer_*() because they represent the state of any | 
|  | 2423 | * kind of lower layer not just hardware media. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2424 | */ | 
|  | 2425 |  | 
| Ben Hutchings | 8f4cccb | 2012-08-20 22:16:51 +0100 | [diff] [blame] | 2426 | extern void linkwatch_init_dev(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2427 | extern void linkwatch_fire_event(struct net_device *dev); | 
| Eric Dumazet | e014deb | 2009-11-17 05:59:21 +0000 | [diff] [blame] | 2428 | extern void linkwatch_forget_dev(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2429 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2430 | /** | 
|  | 2431 | *	netif_carrier_ok - test if carrier present | 
|  | 2432 | *	@dev: network device | 
|  | 2433 | * | 
|  | 2434 | * Check if carrier is present on device | 
|  | 2435 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2436 | static inline bool netif_carrier_ok(const struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2437 | { | 
|  | 2438 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | 
|  | 2439 | } | 
|  | 2440 |  | 
| Eric Dumazet | 9d21493 | 2009-05-17 20:55:16 -0700 | [diff] [blame] | 2441 | extern unsigned long dev_trans_start(struct net_device *dev); | 
|  | 2442 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2443 | extern void __netdev_watchdog_up(struct net_device *dev); | 
|  | 2444 |  | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 2445 | extern void netif_carrier_on(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2446 |  | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 2447 | extern void netif_carrier_off(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2448 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2449 | /** | 
|  | 2450 | *	netif_dormant_on - mark device as dormant. | 
|  | 2451 | *	@dev: network device | 
|  | 2452 | * | 
|  | 2453 | * Mark device as dormant (as per RFC2863). | 
|  | 2454 | * | 
|  | 2455 | * The dormant state indicates that the relevant interface is not | 
|  | 2456 | * actually in a condition to pass packets (i.e., it is not 'up') but is | 
|  | 2457 | * in a "pending" state, waiting for some external event.  For "on- | 
|  | 2458 | * demand" interfaces, this new state identifies the situation where the | 
|  | 2459 | * interface is waiting for events to place it in the up state. | 
|  | 2460 | * | 
|  | 2461 | */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2462 | static inline void netif_dormant_on(struct net_device *dev) | 
|  | 2463 | { | 
|  | 2464 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | 
|  | 2465 | linkwatch_fire_event(dev); | 
|  | 2466 | } | 
|  | 2467 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2468 | /** | 
|  | 2469 | *	netif_dormant_off - set device as not dormant. | 
|  | 2470 | *	@dev: network device | 
|  | 2471 | * | 
|  | 2472 | * Device is not in dormant state. | 
|  | 2473 | */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2474 | static inline void netif_dormant_off(struct net_device *dev) | 
|  | 2475 | { | 
|  | 2476 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | 
|  | 2477 | linkwatch_fire_event(dev); | 
|  | 2478 | } | 
|  | 2479 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2480 | /** | 
|  | 2481 | *	netif_dormant - test if carrier present | 
|  | 2482 | *	@dev: network device | 
|  | 2483 | * | 
|  | 2484 | * Check if carrier is present on device | 
|  | 2485 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2486 | static inline bool netif_dormant(const struct net_device *dev) | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2487 | { | 
|  | 2488 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | 
|  | 2489 | } | 
|  | 2490 |  | 
|  | 2491 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2492 | /** | 
|  | 2493 | *	netif_oper_up - test if device is operational | 
|  | 2494 | *	@dev: network device | 
|  | 2495 | * | 
|  | 2496 | * Check if carrier is operational | 
|  | 2497 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2498 | static inline bool netif_oper_up(const struct net_device *dev) | 
| Eric Dumazet | d94d9fe | 2009-11-04 09:50:58 -0800 | [diff] [blame] | 2499 | { | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 2500 | return (dev->operstate == IF_OPER_UP || | 
|  | 2501 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | 
|  | 2502 | } | 
|  | 2503 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 2504 | /** | 
|  | 2505 | *	netif_device_present - is device available or removed | 
|  | 2506 | *	@dev: network device | 
|  | 2507 | * | 
|  | 2508 | * Check if device has not been removed from system. | 
|  | 2509 | */ | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2510 | static inline bool netif_device_present(struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2511 | { | 
|  | 2512 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | 
|  | 2513 | } | 
|  | 2514 |  | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 2515 | extern void netif_device_detach(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2516 |  | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 2517 | extern void netif_device_attach(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2518 |  | 
|  | 2519 | /* | 
|  | 2520 | * Network interface message level settings | 
|  | 2521 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2522 |  | 
|  | 2523 | enum { | 
|  | 2524 | NETIF_MSG_DRV		= 0x0001, | 
|  | 2525 | NETIF_MSG_PROBE		= 0x0002, | 
|  | 2526 | NETIF_MSG_LINK		= 0x0004, | 
|  | 2527 | NETIF_MSG_TIMER		= 0x0008, | 
|  | 2528 | NETIF_MSG_IFDOWN	= 0x0010, | 
|  | 2529 | NETIF_MSG_IFUP		= 0x0020, | 
|  | 2530 | NETIF_MSG_RX_ERR	= 0x0040, | 
|  | 2531 | NETIF_MSG_TX_ERR	= 0x0080, | 
|  | 2532 | NETIF_MSG_TX_QUEUED	= 0x0100, | 
|  | 2533 | NETIF_MSG_INTR		= 0x0200, | 
|  | 2534 | NETIF_MSG_TX_DONE	= 0x0400, | 
|  | 2535 | NETIF_MSG_RX_STATUS	= 0x0800, | 
|  | 2536 | NETIF_MSG_PKTDATA	= 0x1000, | 
|  | 2537 | NETIF_MSG_HW		= 0x2000, | 
|  | 2538 | NETIF_MSG_WOL		= 0x4000, | 
|  | 2539 | }; | 
|  | 2540 |  | 
|  | 2541 | #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV) | 
|  | 2542 | #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE) | 
|  | 2543 | #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK) | 
|  | 2544 | #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER) | 
|  | 2545 | #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN) | 
|  | 2546 | #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP) | 
|  | 2547 | #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR) | 
|  | 2548 | #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR) | 
|  | 2549 | #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED) | 
|  | 2550 | #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR) | 
|  | 2551 | #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE) | 
|  | 2552 | #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS) | 
|  | 2553 | #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA) | 
|  | 2554 | #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW) | 
|  | 2555 | #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL) | 
|  | 2556 |  | 
|  | 2557 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | 
|  | 2558 | { | 
|  | 2559 | /* use default */ | 
|  | 2560 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | 
|  | 2561 | return default_msg_enable_bits; | 
|  | 2562 | if (debug_value == 0)	/* no output */ | 
|  | 2563 | return 0; | 
|  | 2564 | /* set low N bits */ | 
|  | 2565 | return (1 << debug_value) - 1; | 
|  | 2566 | } | 
|  | 2567 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2568 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2569 | { | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2570 | spin_lock(&txq->_xmit_lock); | 
|  | 2571 | txq->xmit_lock_owner = cpu; | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2572 | } | 
|  | 2573 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2574 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | 
|  | 2575 | { | 
|  | 2576 | spin_lock_bh(&txq->_xmit_lock); | 
|  | 2577 | txq->xmit_lock_owner = smp_processor_id(); | 
|  | 2578 | } | 
|  | 2579 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2580 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2581 | { | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2582 | bool ok = spin_trylock(&txq->_xmit_lock); | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2583 | if (likely(ok)) | 
|  | 2584 | txq->xmit_lock_owner = smp_processor_id(); | 
|  | 2585 | return ok; | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2586 | } | 
|  | 2587 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2588 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | 
|  | 2589 | { | 
|  | 2590 | txq->xmit_lock_owner = -1; | 
|  | 2591 | spin_unlock(&txq->_xmit_lock); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2592 | } | 
|  | 2593 |  | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2594 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | 
|  | 2595 | { | 
|  | 2596 | txq->xmit_lock_owner = -1; | 
|  | 2597 | spin_unlock_bh(&txq->_xmit_lock); | 
|  | 2598 | } | 
|  | 2599 |  | 
| Eric Dumazet | 08baf56 | 2009-05-25 22:58:01 -0700 | [diff] [blame] | 2600 | static inline void txq_trans_update(struct netdev_queue *txq) | 
|  | 2601 | { | 
|  | 2602 | if (txq->xmit_lock_owner != -1) | 
|  | 2603 | txq->trans_start = jiffies; | 
|  | 2604 | } | 
|  | 2605 |  | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2606 | /** | 
|  | 2607 | *	netif_tx_lock - grab network device transmit lock | 
|  | 2608 | *	@dev: network device | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2609 | * | 
|  | 2610 | * Get network device transmit lock | 
|  | 2611 | */ | 
|  | 2612 | static inline void netif_tx_lock(struct net_device *dev) | 
|  | 2613 | { | 
|  | 2614 | unsigned int i; | 
|  | 2615 | int cpu; | 
|  | 2616 |  | 
|  | 2617 | spin_lock(&dev->tx_global_lock); | 
|  | 2618 | cpu = smp_processor_id(); | 
|  | 2619 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|  | 2620 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
|  | 2621 |  | 
|  | 2622 | /* We are the only thread of execution doing a | 
|  | 2623 | * freeze, but we have to grab the _xmit_lock in | 
|  | 2624 | * order to synchronize with threads which are in | 
|  | 2625 | * the ->hard_start_xmit() handler and already | 
|  | 2626 | * checked the frozen bit. | 
|  | 2627 | */ | 
|  | 2628 | __netif_tx_lock(txq, cpu); | 
|  | 2629 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); | 
|  | 2630 | __netif_tx_unlock(txq); | 
|  | 2631 | } | 
|  | 2632 | } | 
|  | 2633 |  | 
|  | 2634 | static inline void netif_tx_lock_bh(struct net_device *dev) | 
|  | 2635 | { | 
|  | 2636 | local_bh_disable(); | 
|  | 2637 | netif_tx_lock(dev); | 
|  | 2638 | } | 
|  | 2639 |  | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2640 | static inline void netif_tx_unlock(struct net_device *dev) | 
|  | 2641 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2642 | unsigned int i; | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2643 |  | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2644 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|  | 2645 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2646 |  | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2647 | /* No need to grab the _xmit_lock here.  If the | 
|  | 2648 | * queue is not stopped for another reason, we | 
|  | 2649 | * force a schedule. | 
|  | 2650 | */ | 
|  | 2651 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | 
| Krishna Kumar | 7b3d3e4 | 2009-08-29 20:21:21 +0000 | [diff] [blame] | 2652 | netif_schedule_queue(txq); | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2653 | } | 
|  | 2654 | spin_unlock(&dev->tx_global_lock); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2655 | } | 
|  | 2656 |  | 
|  | 2657 | static inline void netif_tx_unlock_bh(struct net_device *dev) | 
|  | 2658 | { | 
| David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 2659 | netif_tx_unlock(dev); | 
|  | 2660 | local_bh_enable(); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 2661 | } | 
|  | 2662 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2663 | #define HARD_TX_LOCK(dev, txq, cpu) {			\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2664 | if ((dev->features & NETIF_F_LLTX) == 0) {	\ | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2665 | __netif_tx_lock(txq, cpu);		\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2666 | }						\ | 
|  | 2667 | } | 
|  | 2668 |  | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2669 | #define HARD_TX_UNLOCK(dev, txq) {			\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2670 | if ((dev->features & NETIF_F_LLTX) == 0) {	\ | 
| David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 2671 | __netif_tx_unlock(txq);			\ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 2672 | }						\ | 
|  | 2673 | } | 
|  | 2674 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2675 | static inline void netif_tx_disable(struct net_device *dev) | 
|  | 2676 | { | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2677 | unsigned int i; | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2678 | int cpu; | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2679 |  | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2680 | local_bh_disable(); | 
|  | 2681 | cpu = smp_processor_id(); | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2682 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|  | 2683 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2684 |  | 
|  | 2685 | __netif_tx_lock(txq, cpu); | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2686 | netif_tx_stop_queue(txq); | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2687 | __netif_tx_unlock(txq); | 
| David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 2688 | } | 
| David S. Miller | c3f26a2 | 2008-07-31 16:58:50 -0700 | [diff] [blame] | 2689 | local_bh_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2690 | } | 
|  | 2691 |  | 
| David S. Miller | e308a5d | 2008-07-15 00:13:44 -0700 | [diff] [blame] | 2692 | static inline void netif_addr_lock(struct net_device *dev) | 
|  | 2693 | { | 
|  | 2694 | spin_lock(&dev->addr_list_lock); | 
|  | 2695 | } | 
|  | 2696 |  | 
| Jiri Pirko | 2429f7a | 2012-01-09 06:36:54 +0000 | [diff] [blame] | 2697 | static inline void netif_addr_lock_nested(struct net_device *dev) | 
|  | 2698 | { | 
|  | 2699 | spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); | 
|  | 2700 | } | 
|  | 2701 |  | 
| David S. Miller | e308a5d | 2008-07-15 00:13:44 -0700 | [diff] [blame] | 2702 | static inline void netif_addr_lock_bh(struct net_device *dev) | 
|  | 2703 | { | 
|  | 2704 | spin_lock_bh(&dev->addr_list_lock); | 
|  | 2705 | } | 
|  | 2706 |  | 
|  | 2707 | static inline void netif_addr_unlock(struct net_device *dev) | 
|  | 2708 | { | 
|  | 2709 | spin_unlock(&dev->addr_list_lock); | 
|  | 2710 | } | 
|  | 2711 |  | 
|  | 2712 | static inline void netif_addr_unlock_bh(struct net_device *dev) | 
|  | 2713 | { | 
|  | 2714 | spin_unlock_bh(&dev->addr_list_lock); | 
|  | 2715 | } | 
|  | 2716 |  | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2717 | /* | 
| Jiri Pirko | 31278e7 | 2009-06-17 01:12:19 +0000 | [diff] [blame] | 2718 | * dev_addrs walker. Should be used only for read access. Call with | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2719 | * rcu_read_lock held. | 
|  | 2720 | */ | 
|  | 2721 | #define for_each_dev_addr(dev, ha) \ | 
| Jiri Pirko | 31278e7 | 2009-06-17 01:12:19 +0000 | [diff] [blame] | 2722 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2723 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2724 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | 
|  | 2725 |  | 
|  | 2726 | extern void		ether_setup(struct net_device *dev); | 
|  | 2727 |  | 
|  | 2728 | /* Support for loadable net-drivers */ | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 2729 | extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2730 | void (*setup)(struct net_device *), | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 2731 | unsigned int txqs, unsigned int rxqs); | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 2732 | #define alloc_netdev(sizeof_priv, name, setup) \ | 
| Tom Herbert | 36909ea | 2011-01-09 19:36:31 +0000 | [diff] [blame] | 2733 | alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) | 
|  | 2734 |  | 
|  | 2735 | #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ | 
|  | 2736 | alloc_netdev_mqs(sizeof_priv, name, setup, count, count) | 
|  | 2737 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2738 | extern int		register_netdev(struct net_device *dev); | 
|  | 2739 | extern void		unregister_netdev(struct net_device *dev); | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2740 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 2741 | /* General hardware address lists handling functions */ | 
|  | 2742 | extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | 
|  | 2743 | struct netdev_hw_addr_list *from_list, | 
|  | 2744 | int addr_len, unsigned char addr_type); | 
|  | 2745 | extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | 
|  | 2746 | struct netdev_hw_addr_list *from_list, | 
|  | 2747 | int addr_len, unsigned char addr_type); | 
|  | 2748 | extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | 
|  | 2749 | struct netdev_hw_addr_list *from_list, | 
|  | 2750 | int addr_len); | 
|  | 2751 | extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | 
|  | 2752 | struct netdev_hw_addr_list *from_list, | 
|  | 2753 | int addr_len); | 
|  | 2754 | extern void __hw_addr_flush(struct netdev_hw_addr_list *list); | 
|  | 2755 | extern void __hw_addr_init(struct netdev_hw_addr_list *list); | 
|  | 2756 |  | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2757 | /* Functions used for device addresses handling */ | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 2758 | extern int dev_addr_add(struct net_device *dev, const unsigned char *addr, | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2759 | unsigned char addr_type); | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 2760 | extern int dev_addr_del(struct net_device *dev, const unsigned char *addr, | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2761 | unsigned char addr_type); | 
|  | 2762 | extern int dev_addr_add_multiple(struct net_device *to_dev, | 
|  | 2763 | struct net_device *from_dev, | 
|  | 2764 | unsigned char addr_type); | 
|  | 2765 | extern int dev_addr_del_multiple(struct net_device *to_dev, | 
|  | 2766 | struct net_device *from_dev, | 
|  | 2767 | unsigned char addr_type); | 
| Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 2768 | extern void dev_addr_flush(struct net_device *dev); | 
|  | 2769 | extern int dev_addr_init(struct net_device *dev); | 
|  | 2770 |  | 
|  | 2771 | /* Functions used for unicast addresses handling */ | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 2772 | extern int dev_uc_add(struct net_device *dev, const unsigned char *addr); | 
|  | 2773 | extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); | 
|  | 2774 | extern int dev_uc_del(struct net_device *dev, const unsigned char *addr); | 
| Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 2775 | extern int dev_uc_sync(struct net_device *to, struct net_device *from); | 
| Vlad Yasevich | 4cd729b0 | 2013-04-15 09:54:25 +0000 | [diff] [blame] | 2776 | extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); | 
| Jiri Pirko | a748ee2 | 2010-04-01 21:22:09 +0000 | [diff] [blame] | 2777 | extern void dev_uc_unsync(struct net_device *to, struct net_device *from); | 
|  | 2778 | extern void dev_uc_flush(struct net_device *dev); | 
|  | 2779 | extern void dev_uc_init(struct net_device *dev); | 
| Jiri Pirko | f001fde | 2009-05-05 02:48:28 +0000 | [diff] [blame] | 2780 |  | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 2781 | /* Functions used for multicast addresses handling */ | 
| stephen hemminger | 6b6e272 | 2012-09-17 10:03:26 +0000 | [diff] [blame] | 2782 | extern int dev_mc_add(struct net_device *dev, const unsigned char *addr); | 
|  | 2783 | extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); | 
|  | 2784 | extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); | 
|  | 2785 | extern int dev_mc_del(struct net_device *dev, const unsigned char *addr); | 
|  | 2786 | extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 2787 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); | 
| Vlad Yasevich | 4cd729b0 | 2013-04-15 09:54:25 +0000 | [diff] [blame] | 2788 | extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); | 
| Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 2789 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | 
|  | 2790 | extern void dev_mc_flush(struct net_device *dev); | 
|  | 2791 | extern void dev_mc_init(struct net_device *dev); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2792 |  | 
|  | 2793 | /* Functions used for secondary unicast and multicast support */ | 
|  | 2794 | extern void		dev_set_rx_mode(struct net_device *dev); | 
|  | 2795 | extern void		__dev_set_rx_mode(struct net_device *dev); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2796 | extern int		dev_set_promiscuity(struct net_device *dev, int inc); | 
|  | 2797 | extern int		dev_set_allmulti(struct net_device *dev, int inc); | 
|  | 2798 | extern void		netdev_state_change(struct net_device *dev); | 
| Amerigo Wang | ee89bab | 2012-08-09 22:14:56 +0000 | [diff] [blame] | 2799 | extern void		netdev_notify_peers(struct net_device *dev); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2800 | extern void		netdev_features_change(struct net_device *dev); | 
|  | 2801 | /* Load a device via the kmod */ | 
|  | 2802 | extern void		dev_load(struct net *net, const char *name); | 
| Ben Hutchings | d775351 | 2010-07-09 09:12:41 +0000 | [diff] [blame] | 2803 | extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | 
|  | 2804 | struct rtnl_link_stats64 *storage); | 
| Eric Dumazet | 77a1abf | 2012-03-05 04:50:09 +0000 | [diff] [blame] | 2805 | extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, | 
|  | 2806 | const struct net_device_stats *netdev_stats); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2807 |  | 
|  | 2808 | extern int		netdev_max_backlog; | 
| Eric Dumazet | 3b098e2 | 2010-05-15 23:57:10 -0700 | [diff] [blame] | 2809 | extern int		netdev_tstamp_prequeue; | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2810 | extern int		weight_p; | 
| Eric Dumazet | 0a14842 | 2011-04-20 09:27:32 +0000 | [diff] [blame] | 2811 | extern int		bpf_jit_enable; | 
| Jiri Pirko | 9ff162a | 2013-01-03 22:48:49 +0000 | [diff] [blame] | 2812 |  | 
|  | 2813 | extern bool netdev_has_upper_dev(struct net_device *dev, | 
|  | 2814 | struct net_device *upper_dev); | 
|  | 2815 | extern bool netdev_has_any_upper_dev(struct net_device *dev); | 
| Veaceslav Falico | 8b5be85 | 2013-08-28 23:25:08 +0200 | [diff] [blame] | 2816 | extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, | 
|  | 2817 | struct list_head **iter); | 
|  | 2818 |  | 
|  | 2819 | /* iterate through upper list, must be called under RCU read lock */ | 
|  | 2820 | #define netdev_for_each_upper_dev_rcu(dev, upper, iter) \ | 
|  | 2821 | for (iter = &(dev)->upper_dev_list, \ | 
|  | 2822 | upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ | 
|  | 2823 | upper; \ | 
|  | 2824 | upper = netdev_upper_get_next_dev_rcu(dev, &(iter))) | 
|  | 2825 |  | 
| Jiri Pirko | 9ff162a | 2013-01-03 22:48:49 +0000 | [diff] [blame] | 2826 | extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev); | 
|  | 2827 | extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); | 
|  | 2828 | extern int netdev_upper_dev_link(struct net_device *dev, | 
|  | 2829 | struct net_device *upper_dev); | 
|  | 2830 | extern int netdev_master_upper_dev_link(struct net_device *dev, | 
|  | 2831 | struct net_device *upper_dev); | 
|  | 2832 | extern void netdev_upper_dev_unlink(struct net_device *dev, | 
|  | 2833 | struct net_device *upper_dev); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2834 | extern int skb_checksum_help(struct sk_buff *skb); | 
| Cong Wang | 12b0004 | 2013-02-05 16:36:38 +0000 | [diff] [blame] | 2835 | extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb, | 
|  | 2836 | netdev_features_t features, bool tx_path); | 
| Pravin B Shelar | 05e8ef4 | 2013-02-14 09:44:55 +0000 | [diff] [blame] | 2837 | extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | 
|  | 2838 | netdev_features_t features); | 
| Cong Wang | 12b0004 | 2013-02-05 16:36:38 +0000 | [diff] [blame] | 2839 |  | 
|  | 2840 | static inline | 
|  | 2841 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | 
|  | 2842 | { | 
|  | 2843 | return __skb_gso_segment(skb, features, true); | 
|  | 2844 | } | 
| Pravin B Shelar | ec5f061 | 2013-03-07 09:28:01 +0000 | [diff] [blame] | 2845 | __be16 skb_network_protocol(struct sk_buff *skb); | 
|  | 2846 |  | 
|  | 2847 | static inline bool can_checksum_protocol(netdev_features_t features, | 
|  | 2848 | __be16 protocol) | 
|  | 2849 | { | 
|  | 2850 | return ((features & NETIF_F_GEN_CSUM) || | 
|  | 2851 | ((features & NETIF_F_V4_CSUM) && | 
|  | 2852 | protocol == htons(ETH_P_IP)) || | 
|  | 2853 | ((features & NETIF_F_V6_CSUM) && | 
|  | 2854 | protocol == htons(ETH_P_IPV6)) || | 
|  | 2855 | ((features & NETIF_F_FCOE_CRC) && | 
|  | 2856 | protocol == htons(ETH_P_FCOE))); | 
|  | 2857 | } | 
| Cong Wang | 12b0004 | 2013-02-05 16:36:38 +0000 | [diff] [blame] | 2858 |  | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 2859 | #ifdef CONFIG_BUG | 
|  | 2860 | extern void netdev_rx_csum_fault(struct net_device *dev); | 
|  | 2861 | #else | 
|  | 2862 | static inline void netdev_rx_csum_fault(struct net_device *dev) | 
|  | 2863 | { | 
|  | 2864 | } | 
|  | 2865 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2866 | /* rx skb timestamps */ | 
|  | 2867 | extern void		net_enable_timestamp(void); | 
|  | 2868 | extern void		net_disable_timestamp(void); | 
|  | 2869 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 2870 | #ifdef CONFIG_PROC_FS | 
| Cong Wang | 900ff8c | 2013-02-18 19:20:33 +0000 | [diff] [blame] | 2871 | extern int __init dev_proc_init(void); | 
|  | 2872 | #else | 
|  | 2873 | #define dev_proc_init() 0 | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 2874 | #endif | 
|  | 2875 |  | 
| Jay Vosburgh | b8a9787 | 2008-06-13 18:12:04 -0700 | [diff] [blame] | 2876 | extern int netdev_class_create_file(struct class_attribute *class_attr); | 
|  | 2877 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | 
|  | 2878 |  | 
| Johannes Berg | 0460079 | 2010-08-05 17:45:15 +0200 | [diff] [blame] | 2879 | extern struct kobj_ns_type_operations net_ns_type_operations; | 
|  | 2880 |  | 
| David S. Miller | 3019de1 | 2011-06-06 16:41:33 -0700 | [diff] [blame] | 2881 | extern const char *netdev_drivername(const struct net_device *dev); | 
| Arjan van de Ven | 6579e57 | 2008-07-21 13:31:48 -0700 | [diff] [blame] | 2882 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 2883 | extern void linkwatch_run_queue(void); | 
|  | 2884 |  | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 2885 | static inline netdev_features_t netdev_get_wanted_features( | 
|  | 2886 | struct net_device *dev) | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 2887 | { | 
|  | 2888 | return (dev->features & ~dev->hw_features) | dev->wanted_features; | 
|  | 2889 | } | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 2890 | netdev_features_t netdev_increment_features(netdev_features_t all, | 
|  | 2891 | netdev_features_t one, netdev_features_t mask); | 
| Eric Dumazet | b0ce350 | 2013-05-16 07:34:53 +0000 | [diff] [blame] | 2892 |  | 
|  | 2893 | /* Allow TSO being used on stacked device : | 
|  | 2894 | * Performing the GSO segmentation before last device | 
|  | 2895 | * is a performance improvement. | 
|  | 2896 | */ | 
|  | 2897 | static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, | 
|  | 2898 | netdev_features_t mask) | 
|  | 2899 | { | 
|  | 2900 | return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); | 
|  | 2901 | } | 
|  | 2902 |  | 
| Michał Mirosław | 6cb6a27 | 2011-04-02 22:48:47 -0700 | [diff] [blame] | 2903 | int __netdev_update_features(struct net_device *dev); | 
| Michał Mirosław | 5455c69 | 2011-02-15 16:59:17 +0000 | [diff] [blame] | 2904 | void netdev_update_features(struct net_device *dev); | 
| Michał Mirosław | afe12cc | 2011-05-07 03:22:17 +0000 | [diff] [blame] | 2905 | void netdev_change_features(struct net_device *dev); | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 2906 |  | 
| Patrick Mullaney | fc4a748 | 2009-12-03 15:59:22 -0800 | [diff] [blame] | 2907 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, | 
|  | 2908 | struct net_device *dev); | 
|  | 2909 |  | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 2910 | netdev_features_t netif_skb_features(struct sk_buff *skb); | 
| Jesse Gross | 58e998c | 2010-10-29 12:14:55 +0000 | [diff] [blame] | 2911 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2912 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) | 
| Herbert Xu | bcd7611 | 2006-06-30 13:36:35 -0700 | [diff] [blame] | 2913 | { | 
| Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 2914 | netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; | 
| Michał Mirosław | 0345e18 | 2011-11-16 14:05:33 +0000 | [diff] [blame] | 2915 |  | 
|  | 2916 | /* check flags correspondence */ | 
|  | 2917 | BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); | 
|  | 2918 | BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); | 
|  | 2919 | BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); | 
|  | 2920 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | 
|  | 2921 | BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); | 
|  | 2922 | BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | 
|  | 2923 |  | 
| Herbert Xu | bcd7611 | 2006-06-30 13:36:35 -0700 | [diff] [blame] | 2924 | return (features & feature) == feature; | 
|  | 2925 | } | 
|  | 2926 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2927 | static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2928 | { | 
| Herbert Xu | 278b251 | 2009-06-03 21:20:51 -0700 | [diff] [blame] | 2929 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && | 
| David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2930 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2931 | } | 
|  | 2932 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2933 | static inline bool netif_needs_gso(struct sk_buff *skb, | 
|  | 2934 | netdev_features_t features) | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 2935 | { | 
| Jesse Gross | fc74121 | 2011-01-09 06:23:32 +0000 | [diff] [blame] | 2936 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || | 
| Yi Zou | cdbee74 | 2012-03-16 23:08:11 +0000 | [diff] [blame] | 2937 | unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && | 
|  | 2938 | (skb->ip_summed != CHECKSUM_UNNECESSARY))); | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 2939 | } | 
|  | 2940 |  | 
| Peter P Waskiewicz Jr | 82cc1a7 | 2008-03-21 03:43:19 -0700 | [diff] [blame] | 2941 | static inline void netif_set_gso_max_size(struct net_device *dev, | 
|  | 2942 | unsigned int size) | 
|  | 2943 | { | 
|  | 2944 | dev->gso_max_size = size; | 
|  | 2945 | } | 
|  | 2946 |  | 
| nikolay@redhat.com | 8a7fbfa | 2013-03-12 02:49:01 +0000 | [diff] [blame] | 2947 | static inline bool netif_is_bond_master(struct net_device *dev) | 
|  | 2948 | { | 
|  | 2949 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; | 
|  | 2950 | } | 
|  | 2951 |  | 
| David S. Miller | 4d29515 | 2012-03-07 21:02:35 -0500 | [diff] [blame] | 2952 | static inline bool netif_is_bond_slave(struct net_device *dev) | 
| Jiri Pirko | 1765a57 | 2011-02-12 06:48:36 +0000 | [diff] [blame] | 2953 | { | 
|  | 2954 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; | 
|  | 2955 | } | 
|  | 2956 |  | 
| Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 2957 | static inline bool netif_supports_nofcs(struct net_device *dev) | 
|  | 2958 | { | 
|  | 2959 | return dev->priv_flags & IFF_SUPP_NOFCS; | 
|  | 2960 | } | 
|  | 2961 |  | 
| Eric W. Biederman | 505d4f7 | 2008-11-07 22:54:20 -0800 | [diff] [blame] | 2962 | extern struct pernet_operations __net_initdata loopback_net_ops; | 
| Patrick McHardy | b1b67dd | 2009-04-20 04:49:28 +0000 | [diff] [blame] | 2963 |  | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 2964 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ | 
|  | 2965 |  | 
|  | 2966 | /* netdev_printk helpers, similar to dev_printk */ | 
|  | 2967 |  | 
|  | 2968 | static inline const char *netdev_name(const struct net_device *dev) | 
|  | 2969 | { | 
|  | 2970 | if (dev->reg_state != NETREG_REGISTERED) | 
|  | 2971 | return "(unregistered net_device)"; | 
|  | 2972 | return dev->name; | 
|  | 2973 | } | 
|  | 2974 |  | 
| Joe Perches | b9075fa | 2011-10-31 17:11:33 -0700 | [diff] [blame] | 2975 | extern __printf(3, 4) | 
|  | 2976 | int netdev_printk(const char *level, const struct net_device *dev, | 
|  | 2977 | const char *format, ...); | 
|  | 2978 | extern __printf(2, 3) | 
|  | 2979 | int netdev_emerg(const struct net_device *dev, const char *format, ...); | 
|  | 2980 | extern __printf(2, 3) | 
|  | 2981 | int netdev_alert(const struct net_device *dev, const char *format, ...); | 
|  | 2982 | extern __printf(2, 3) | 
|  | 2983 | int netdev_crit(const struct net_device *dev, const char *format, ...); | 
|  | 2984 | extern __printf(2, 3) | 
|  | 2985 | int netdev_err(const struct net_device *dev, const char *format, ...); | 
|  | 2986 | extern __printf(2, 3) | 
|  | 2987 | int netdev_warn(const struct net_device *dev, const char *format, ...); | 
|  | 2988 | extern __printf(2, 3) | 
|  | 2989 | int netdev_notice(const struct net_device *dev, const char *format, ...); | 
|  | 2990 | extern __printf(2, 3) | 
|  | 2991 | int netdev_info(const struct net_device *dev, const char *format, ...); | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 2992 |  | 
| Vasiliy Kulikov | 8909c9a | 2011-03-02 00:33:13 +0300 | [diff] [blame] | 2993 | #define MODULE_ALIAS_NETDEV(device) \ | 
|  | 2994 | MODULE_ALIAS("netdev-" device) | 
|  | 2995 |  | 
| Jim Cromie | b558c96 | 2011-12-19 17:11:18 -0500 | [diff] [blame] | 2996 | #if defined(CONFIG_DYNAMIC_DEBUG) | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 2997 | #define netdev_dbg(__dev, format, args...)			\ | 
|  | 2998 | do {								\ | 
| Jason Baron | ffa10cb | 2011-08-11 14:36:48 -0400 | [diff] [blame] | 2999 | dynamic_netdev_dbg(__dev, format, ##args);		\ | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 3000 | } while (0) | 
| Jim Cromie | b558c96 | 2011-12-19 17:11:18 -0500 | [diff] [blame] | 3001 | #elif defined(DEBUG) | 
|  | 3002 | #define netdev_dbg(__dev, format, args...)			\ | 
|  | 3003 | netdev_printk(KERN_DEBUG, __dev, format, ##args) | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 3004 | #else | 
|  | 3005 | #define netdev_dbg(__dev, format, args...)			\ | 
|  | 3006 | ({								\ | 
|  | 3007 | if (0)							\ | 
|  | 3008 | netdev_printk(KERN_DEBUG, __dev, format, ##args); \ | 
|  | 3009 | 0;							\ | 
|  | 3010 | }) | 
|  | 3011 | #endif | 
|  | 3012 |  | 
|  | 3013 | #if defined(VERBOSE_DEBUG) | 
|  | 3014 | #define netdev_vdbg	netdev_dbg | 
|  | 3015 | #else | 
|  | 3016 |  | 
|  | 3017 | #define netdev_vdbg(dev, format, args...)			\ | 
|  | 3018 | ({								\ | 
|  | 3019 | if (0)							\ | 
|  | 3020 | netdev_printk(KERN_DEBUG, dev, format, ##args);	\ | 
|  | 3021 | 0;							\ | 
|  | 3022 | }) | 
|  | 3023 | #endif | 
|  | 3024 |  | 
|  | 3025 | /* | 
|  | 3026 | * netdev_WARN() acts like dev_printk(), but with the key difference | 
|  | 3027 | * of using a WARN/WARN_ON to get the message out, including the | 
|  | 3028 | * file/line information and a backtrace. | 
|  | 3029 | */ | 
|  | 3030 | #define netdev_WARN(dev, format, args...)			\ | 
|  | 3031 | WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); | 
|  | 3032 |  | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3033 | /* netif printk helpers, similar to netdev_printk */ | 
|  | 3034 |  | 
|  | 3035 | #define netif_printk(priv, type, level, dev, fmt, args...)	\ | 
|  | 3036 | do {					  			\ | 
|  | 3037 | if (netif_msg_##type(priv))				\ | 
|  | 3038 | netdev_printk(level, (dev), fmt, ##args);	\ | 
|  | 3039 | } while (0) | 
|  | 3040 |  | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3041 | #define netif_level(level, priv, type, dev, fmt, args...)	\ | 
|  | 3042 | do {								\ | 
|  | 3043 | if (netif_msg_##type(priv))				\ | 
|  | 3044 | netdev_##level(dev, fmt, ##args);		\ | 
|  | 3045 | } while (0) | 
|  | 3046 |  | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3047 | #define netif_emerg(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3048 | netif_level(emerg, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3049 | #define netif_alert(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3050 | netif_level(alert, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3051 | #define netif_crit(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3052 | netif_level(crit, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3053 | #define netif_err(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3054 | netif_level(err, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3055 | #define netif_warn(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3056 | netif_level(warn, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3057 | #define netif_notice(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3058 | netif_level(notice, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3059 | #define netif_info(priv, type, dev, fmt, args...)		\ | 
| Joe Perches | f45f432 | 2010-06-27 01:02:36 +0000 | [diff] [blame] | 3060 | netif_level(info, priv, type, dev, fmt, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3061 |  | 
| Joe Perches | 0053ea9 | 2012-05-30 07:43:34 +0000 | [diff] [blame] | 3062 | #if defined(CONFIG_DYNAMIC_DEBUG) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3063 | #define netif_dbg(priv, type, netdev, format, args...)		\ | 
|  | 3064 | do {								\ | 
|  | 3065 | if (netif_msg_##type(priv))				\ | 
| Jason Baron | b5fb0a0 | 2011-08-11 14:36:53 -0400 | [diff] [blame] | 3066 | dynamic_netdev_dbg(netdev, format, ##args);	\ | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3067 | } while (0) | 
| Joe Perches | 0053ea9 | 2012-05-30 07:43:34 +0000 | [diff] [blame] | 3068 | #elif defined(DEBUG) | 
|  | 3069 | #define netif_dbg(priv, type, dev, format, args...)		\ | 
|  | 3070 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3071 | #else | 
|  | 3072 | #define netif_dbg(priv, type, dev, format, args...)			\ | 
|  | 3073 | ({									\ | 
|  | 3074 | if (0)								\ | 
|  | 3075 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | 
|  | 3076 | 0;								\ | 
|  | 3077 | }) | 
|  | 3078 | #endif | 
|  | 3079 |  | 
|  | 3080 | #if defined(VERBOSE_DEBUG) | 
| Ben Hutchings | bcfcc45 | 2010-07-02 07:08:44 +0000 | [diff] [blame] | 3081 | #define netif_vdbg	netif_dbg | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3082 | #else | 
|  | 3083 | #define netif_vdbg(priv, type, dev, format, args...)		\ | 
|  | 3084 | ({								\ | 
|  | 3085 | if (0)							\ | 
| Ben Hutchings | a4ed89c | 2010-05-18 06:56:32 +0000 | [diff] [blame] | 3086 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | 
| Joe Perches | b3d95c5 | 2010-02-09 11:49:49 +0000 | [diff] [blame] | 3087 | 0;							\ | 
|  | 3088 | }) | 
|  | 3089 | #endif | 
| Joe Perches | 571ba42 | 2010-02-09 11:49:47 +0000 | [diff] [blame] | 3090 |  | 
| Cong Wang | 900ff8c | 2013-02-18 19:20:33 +0000 | [diff] [blame] | 3091 | /* | 
|  | 3092 | *	The list of packet types we will receive (as opposed to discard) | 
|  | 3093 | *	and the routines to invoke. | 
|  | 3094 | * | 
|  | 3095 | *	Why 16. Because with 16 the only overlap we get on a hash of the | 
|  | 3096 | *	low nibble of the protocol value is RARP/SNAP/X.25. | 
|  | 3097 | * | 
|  | 3098 | *      NOTE:  That is no longer true with the addition of VLAN tags.  Not | 
|  | 3099 | *             sure which should go first, but I bet it won't make much | 
|  | 3100 | *             difference if we are running VLANs.  The good news is that | 
|  | 3101 | *             this protocol won't be in the list unless compiled in, so | 
|  | 3102 | *             the average user (w/out VLANs) will not be adversely affected. | 
|  | 3103 | *             --BLG | 
|  | 3104 | * | 
|  | 3105 | *		0800	IP | 
|  | 3106 | *		8100    802.1Q VLAN | 
|  | 3107 | *		0001	802.3 | 
|  | 3108 | *		0002	AX.25 | 
|  | 3109 | *		0004	802.2 | 
|  | 3110 | *		8035	RARP | 
|  | 3111 | *		0005	SNAP | 
|  | 3112 | *		0805	X.25 | 
|  | 3113 | *		0806	ARP | 
|  | 3114 | *		8137	IPX | 
|  | 3115 | *		0009	Localtalk | 
|  | 3116 | *		86DD	IPv6 | 
|  | 3117 | */ | 
|  | 3118 | #define PTYPE_HASH_SIZE	(16) | 
|  | 3119 | #define PTYPE_HASH_MASK	(PTYPE_HASH_SIZE - 1) | 
|  | 3120 |  | 
| Jiri Pirko | 385a154 | 2009-05-27 15:48:07 -0700 | [diff] [blame] | 3121 | #endif	/* _LINUX_NETDEVICE_H */ |