blob: 9a4156845e9348cb4fe4b803a64041d1db5de377 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070014 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
Jean Pihete8db0be2011-08-25 15:35:03 +020028#include <linux/pm_qos.h>
Al Virod7fe0f22006-12-03 23:15:30 -050029#include <linux/timer.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050030#include <linux/bug.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070031#include <linux/delay.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/cache.h>
34#include <asm/byteorder.h>
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070037#include <linux/rculist.h>
Chris Leechdb217332006-06-17 21:24:58 -070038#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070039#include <linux/workqueue.h>
Tom Herbert114cf582011-11-28 16:33:09 +000040#include <linux/dynamic_queue_limits.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000042#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020043#include <net/net_namespace.h>
Lennert Buytenhekcf85d082008-10-07 13:45:02 +000044#include <net/dsa.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080045#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080046#include <net/dcbnl.h>
47#endif
Neil Horman5bc14212011-11-22 05:10:51 +000048#include <net/netprio_cgroup.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020049
Michał Mirosława59e2ec2011-11-15 15:29:55 +000050#include <linux/netdev_features.h>
John Fastabend77162022012-04-15 06:43:56 +000051#include <linux/neighbour.h>
David Howells607ca462012-10-13 10:46:48 +010052#include <uapi/linux/netdevice.h>
Michał Mirosława59e2ec2011-11-15 15:29:55 +000053
Jeff Moyer115c1d62005-06-22 22:05:31 -070054struct netpoll_info;
Paul Gortmaker313162d2012-01-30 11:46:54 -050055struct device;
Richard Cochranc1f19b52010-07-17 08:49:36 +000056struct phy_device;
Johannes Berg704232c2007-04-23 12:20:05 -070057/* 802.11 specific */
58struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 /* source back-compat hooks */
60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
62
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +000063extern void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops);
65
Stefan Assmannc1f79422010-07-22 02:50:21 +000066/* hardware address assignment types */
67#define NET_ADDR_PERM 0 /* address is permanent (default) */
68#define NET_ADDR_RANDOM 1 /* address is generated randomly */
69#define NET_ADDR_STOLEN 2 /* address is stolen from other device */
Jiri Pirkofbdeca22013-01-01 03:30:16 +000070#define NET_ADDR_SET 3 /* address is set using
71 * dev_set_mac_address() */
Stefan Assmannc1f79422010-07-22 02:50:21 +000072
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000073/* Backlog congestion levels */
74#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75#define NET_RX_DROP 1 /* packet dropped */
76
Patrick McHardy572a9d72009-11-10 06:14:14 +000077/*
78 * Transmit return codes: transmit return codes originate from three different
79 * namespaces:
80 *
81 * - qdisc return codes
82 * - driver transmit return codes
83 * - errno values
84 *
85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously, in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), all
91 * others are propagated to higher layers.
92 */
93
94/* qdisc ->enqueue() return codes. */
95#define NET_XMIT_SUCCESS 0x00
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000096#define NET_XMIT_DROP 0x01 /* skb dropped */
97#define NET_XMIT_CN 0x02 /* congestion notification */
98#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
99#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200101/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
102 * indicates that the device will soon be dropping packets, or already drops
103 * some packets of the same priority; prompting us to send less aggressively. */
Patrick McHardy572a9d72009-11-10 06:14:14 +0000104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000107/* Driver transmit return codes */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000108#define NETDEV_TX_MASK 0xf0
Patrick McHardy572a9d72009-11-10 06:14:14 +0000109
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000110enum netdev_tx {
Patrick McHardy572a9d72009-11-10 06:14:14 +0000111 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000112 NETDEV_TX_OK = 0x00, /* driver took care of packet */
113 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
114 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000115};
116typedef enum netdev_tx netdev_tx_t;
117
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000118/*
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
121 */
122static inline bool dev_xmit_complete(int rc)
123{
124 /*
125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
129 */
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/*
137 * Compute the worst case header length according to the protocols
138 * used.
139 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800140
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000141#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
David S. Miller8388e3d2008-05-12 20:17:33 -0700142# if defined(CONFIG_MAC80211_MESH)
143# define LL_MAX_HEADER 128
144# else
145# define LL_MAX_HEADER 96
146# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700148# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149#endif
150
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000151#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define MAX_HEADER LL_MAX_HEADER
154#else
155#define MAX_HEADER (LL_MAX_HEADER + 48)
156#endif
157
158/*
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000159 * Old network device statistics. Fields are native words
160 * (unsigned long) so they can be read and written atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800162
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800163struct net_device_stats {
Ben Hutchings3cfde792010-07-09 09:11:52 +0000164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 unsigned long collisions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 unsigned long rx_length_errors;
Ben Hutchings3cfde792010-07-09 09:11:52 +0000175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187};
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190#include <linux/cache.h>
191#include <linux/skbuff.h>
192
Eric Dumazetadc93002011-11-17 03:13:26 +0000193#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +0100194#include <linux/static_key.h>
195extern struct static_key rps_needed;
Eric Dumazetadc93002011-11-17 03:13:26 +0000196#endif
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198struct neighbour;
199struct neigh_parms;
200struct sk_buff;
201
Jiri Pirkof001fde2009-05-05 02:48:28 +0000202struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000206#define NETDEV_HW_ADDR_T_LAN 1
207#define NETDEV_HW_ADDR_T_SAN 2
208#define NETDEV_HW_ADDR_T_SLAVE 3
209#define NETDEV_HW_ADDR_T_UNICAST 4
Jiri Pirko22bedad32010-04-01 21:22:57 +0000210#define NETDEV_HW_ADDR_T_MULTICAST 5
Jiri Pirko22bedad32010-04-01 21:22:57 +0000211 bool global_use;
Vlad Yasevich4cd729b02013-04-15 09:54:25 +0000212 int sync_cnt;
Eric Dumazet8f8f1032010-09-19 11:24:02 -0700213 int refcount;
Vlad Yasevich4543fbe2013-04-02 17:10:07 -0400214 int synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000215 struct rcu_head rcu_head;
216};
217
Jiri Pirko31278e72009-06-17 01:12:19 +0000218struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221};
222
Jiri Pirko22bedad32010-04-01 21:22:57 +0000223#define netdev_hw_addr_list_count(l) ((l)->count)
224#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225#define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800230#define netdev_for_each_uc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800232
Jiri Pirko22bedad32010-04-01 21:22:57 +0000233#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
Pavel Roskin18e225f2010-04-07 16:40:09 -0700235#define netdev_for_each_mc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
Jiri Pirko6683ece2010-02-04 10:22:25 -0800237
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800238struct hh_cache {
David S. Millerf6b72b62011-07-14 07:53:20 -0700239 u16 hh_len;
David S. Miller5c25f682011-07-13 00:51:10 -0700240 u16 __pad;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800241 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 /* cached hardware header; allow for machine alignment needs. */
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
253 * Alternative is:
254 * dev->hard_header_len ? (dev->hard_header_len +
255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
256 *
257 * We could use other alignment values, but we must maintain the
258 * relationship HH alignment <= LL alignment.
259 */
260#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
Eric Dumazet95c96172012-04-15 05:58:06 +0000268 const void *saddr, unsigned int len);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*rebuild)(struct sk_buff *skb);
David S. Millere69dd332011-07-12 23:28:12 -0700271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev,
274 const unsigned char *haddr);
275};
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277/* These flag bits are private to the generic network queueing
278 * layer, they may not be explicitly referenced by any other
279 * code.
280 */
281
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800282enum netdev_state_t {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288};
289
290
291/*
292 * This structure holds at boot time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800293 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 */
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -0300301extern int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700304 * Structure for NAPI scheduling similar to tasklet but with weighting
305 */
306struct napi_struct {
307 /* The poll_list must only be managed by the entity which
308 * changes the state of the NAPI_STATE_SCHED bit. This means
309 * whoever atomically sets that bit can add this napi_struct
310 * to the per-cpu poll_list, and whoever clears that bit
311 * can remove from the list right before clearing the bit.
312 */
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000317 unsigned int gro_count;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock;
321 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700322#endif
Herbert Xu5d38a072009-01-04 16:13:40 -0800323 struct net_device *dev;
Herbert Xud565b0a2008-12-15 23:38:52 -0800324 struct sk_buff *gro_list;
Herbert Xu5d38a072009-01-04 16:13:40 -0800325 struct sk_buff *skb;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000326 struct list_head dev_list;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700329};
330
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800331enum {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700332 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800333 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300335 NAPI_STATE_HASHED, /* In NAPI hash */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700336};
337
Ben Hutchings5b252f02009-10-29 07:17:09 +0000338enum gro_result {
Herbert Xud1c76af2009-03-16 10:50:02 -0700339 GRO_MERGED,
340 GRO_MERGED_FREE,
341 GRO_HELD,
342 GRO_NORMAL,
343 GRO_DROP,
344};
Ben Hutchings5b252f02009-10-29 07:17:09 +0000345typedef enum gro_result gro_result_t;
Herbert Xud1c76af2009-03-16 10:50:02 -0700346
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000347/*
348 * enum rx_handler_result - Possible return values for rx_handlers.
349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
350 * further.
351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
352 * case skb->dev was changed by rx_handler.
353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
354 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
355 *
356 * rx_handlers are functions called from inside __netif_receive_skb(), to do
357 * special processing of the skb, prior to delivery to protocol handlers.
358 *
359 * Currently, a net_device can only have a single rx_handler registered. Trying
360 * to register a second rx_handler will return -EBUSY.
361 *
362 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
363 * To unregister a rx_handler on a net_device, use
364 * netdev_rx_handler_unregister().
365 *
366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
367 * do with the skb.
368 *
369 * If the rx_handler consumed to skb in some way, it should return
370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
371 * the skb to be delivered in some other ways.
372 *
373 * If the rx_handler changed skb->dev, to divert the skb to another
374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
375 * new device will be called if it exists.
376 *
377 * If the rx_handler consider the skb should be ignored, it should return
378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
Adam Buchbinderd93cf062012-09-19 21:47:58 -0400379 * are registered on exact device (ptype->dev == skb->dev).
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000380 *
381 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
382 * delivered, it should return RX_HANDLER_PASS.
383 *
384 * A device without a registered rx_handler will behave as if rx_handler
385 * returned RX_HANDLER_PASS.
386 */
387
388enum rx_handler_result {
389 RX_HANDLER_CONSUMED,
390 RX_HANDLER_ANOTHER,
391 RX_HANDLER_EXACT,
392 RX_HANDLER_PASS,
393};
394typedef enum rx_handler_result rx_handler_result_t;
395typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000396
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800397extern void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700398
David S. Miller4d295152012-03-07 21:02:35 -0500399static inline bool napi_disable_pending(struct napi_struct *n)
David S. Millera0a46192008-01-07 20:35:07 -0800400{
401 return test_bit(NAPI_STATE_DISABLE, &n->state);
402}
403
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700404/**
405 * napi_schedule_prep - check if napi can be scheduled
406 * @n: napi context
407 *
408 * Test if NAPI routine is already running, and if not mark
409 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800410 * insure only one NAPI poll instance runs. We also make
411 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700412 */
David S. Miller4d295152012-03-07 21:02:35 -0500413static inline bool napi_schedule_prep(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700414{
David S. Millera0a46192008-01-07 20:35:07 -0800415 return !napi_disable_pending(n) &&
416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700417}
418
419/**
420 * napi_schedule - schedule NAPI poll
421 * @n: napi context
422 *
423 * Schedule NAPI poll routine to be called if it is not already
424 * running.
425 */
426static inline void napi_schedule(struct napi_struct *n)
427{
428 if (napi_schedule_prep(n))
429 __napi_schedule(n);
430}
431
Roland Dreierbfe13f52007-10-09 15:47:37 -0700432/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
David S. Miller4d295152012-03-07 21:02:35 -0500433static inline bool napi_reschedule(struct napi_struct *napi)
Roland Dreierbfe13f52007-10-09 15:47:37 -0700434{
435 if (napi_schedule_prep(napi)) {
436 __napi_schedule(napi);
David S. Miller4d295152012-03-07 21:02:35 -0500437 return true;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700438 }
David S. Miller4d295152012-03-07 21:02:35 -0500439 return false;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700440}
441
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700442/**
443 * napi_complete - NAPI processing complete
444 * @n: napi context
445 *
446 * Mark NAPI processing as complete.
447 */
Herbert Xud565b0a2008-12-15 23:38:52 -0800448extern void __napi_complete(struct napi_struct *n);
449extern void napi_complete(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700450
451/**
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300452 * napi_by_id - lookup a NAPI by napi_id
453 * @napi_id: hashed napi_id
454 *
455 * lookup @napi_id in napi_hash table
456 * must be called under rcu_read_lock()
457 */
458extern struct napi_struct *napi_by_id(unsigned int napi_id);
459
460/**
461 * napi_hash_add - add a NAPI to global hashtable
462 * @napi: napi context
463 *
464 * generate a new napi_id and store a @napi under it in napi_hash
465 */
466extern void napi_hash_add(struct napi_struct *napi);
467
468/**
469 * napi_hash_del - remove a NAPI from global table
470 * @napi: napi context
471 *
472 * Warning: caller must observe rcu grace period
473 * before freeing memory containing @napi
474 */
475extern void napi_hash_del(struct napi_struct *napi);
476
477/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700478 * napi_disable - prevent NAPI from scheduling
479 * @n: napi context
480 *
481 * Stop NAPI from being scheduled on this context.
482 * Waits till any outstanding processing completes.
483 */
484static inline void napi_disable(struct napi_struct *n)
485{
David S. Millera0a46192008-01-07 20:35:07 -0800486 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700488 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800489 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700490}
491
492/**
493 * napi_enable - enable NAPI scheduling
494 * @n: napi context
495 *
496 * Resume NAPI from being scheduled on this context.
497 * Must be paired with napi_disable.
498 */
499static inline void napi_enable(struct napi_struct *n)
500{
501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
502 smp_mb__before_clear_bit();
503 clear_bit(NAPI_STATE_SCHED, &n->state);
504}
505
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700506#ifdef CONFIG_SMP
507/**
508 * napi_synchronize - wait until NAPI is not running
509 * @n: napi context
510 *
511 * Wait until NAPI is done being scheduled on this context.
512 * Waits till any outstanding processing completes but
513 * does not disable future activations.
514 */
515static inline void napi_synchronize(const struct napi_struct *n)
516{
517 while (test_bit(NAPI_STATE_SCHED, &n->state))
518 msleep(1);
519}
520#else
521# define napi_synchronize(n) barrier()
522#endif
523
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800524enum netdev_queue_state_t {
Tom Herbert734664982011-11-28 16:32:44 +0000525 __QUEUE_STATE_DRV_XOFF,
526 __QUEUE_STATE_STACK_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700527 __QUEUE_STATE_FROZEN,
Tom Herbert734664982011-11-28 16:32:44 +0000528#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
529 (1 << __QUEUE_STATE_STACK_XOFF))
530#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
531 (1 << __QUEUE_STATE_FROZEN))
David S. Miller79d16382008-07-08 23:14:46 -0700532};
Tom Herbert734664982011-11-28 16:32:44 +0000533/*
534 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
535 * netif_tx_* functions below are used to manipulate this flag. The
536 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
537 * queue independently. The netif_xmit_*stopped functions below are called
538 * to check if the queue has been stopped by the driver or stack (either
539 * of the XOFF bits are set in the state). Drivers should not need to call
540 * netif_xmit*stopped functions, they should only be using netif_tx_*.
541 */
David S. Miller79d16382008-07-08 23:14:46 -0700542
David S. Millerbb949fb2008-07-08 16:55:56 -0700543struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700544/*
545 * read mostly part
546 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700547 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700548 struct Qdisc *qdisc;
549 struct Qdisc *qdisc_sleeping;
david decotignyccf5ff62011-11-16 12:15:10 +0000550#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000551 struct kobject kobj;
552#endif
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000553#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
554 int numa_node;
555#endif
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700556/*
557 * write mostly part
558 */
559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
560 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700561 /*
562 * please use this field instead of dev->trans_start
563 */
564 unsigned long trans_start;
david decotignyccf5ff62011-11-16 12:15:10 +0000565
566 /*
567 * Number of TX timeouts for this queue
568 * (/sys/class/net/DEV/Q/trans_timeout)
569 */
570 unsigned long trans_timeout;
Tom Herbert114cf582011-11-28 16:33:09 +0000571
572 unsigned long state;
573
574#ifdef CONFIG_BQL
575 struct dql dql;
576#endif
David S. Millere8a04642008-07-17 00:34:19 -0700577} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700578
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000579static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
580{
581#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
582 return q->numa_node;
583#else
Changli Gaob236da62010-12-14 03:09:15 +0000584 return NUMA_NO_NODE;
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000585#endif
586}
587
588static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
589{
590#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
591 q->numa_node = node;
592#endif
593}
594
Eric Dumazetdf334542010-03-24 19:13:54 +0000595#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000596/*
597 * This structure holds an RPS map which can be of variable length. The
598 * map is an array of CPUs.
599 */
600struct rps_map {
601 unsigned int len;
602 struct rcu_head rcu;
603 u16 cpus[0];
604};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000605#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
Tom Herbert0a9627f2010-03-16 08:03:29 +0000606
Tom Herbertfec5e652010-04-16 16:01:27 -0700607/*
Ben Hutchingsc4454772011-01-19 11:03:53 +0000608 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
609 * tail pointer for that CPU's input queue at the time of last enqueue, and
610 * a hardware filter index.
Tom Herbertfec5e652010-04-16 16:01:27 -0700611 */
612struct rps_dev_flow {
613 u16 cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +0000614 u16 filter;
Tom Herbertfec5e652010-04-16 16:01:27 -0700615 unsigned int last_qtail;
616};
Ben Hutchingsc4454772011-01-19 11:03:53 +0000617#define RPS_NO_FILTER 0xffff
Tom Herbertfec5e652010-04-16 16:01:27 -0700618
619/*
620 * The rps_dev_flow_table structure contains a table of flow mappings.
621 */
622struct rps_dev_flow_table {
623 unsigned int mask;
624 struct rcu_head rcu;
Tom Herbertfec5e652010-04-16 16:01:27 -0700625 struct rps_dev_flow flows[0];
626};
627#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000628 ((_num) * sizeof(struct rps_dev_flow)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700629
630/*
631 * The rps_sock_flow_table contains mappings of flows to the last CPU
632 * on which they were processed by the application (set in recvmsg).
633 */
634struct rps_sock_flow_table {
635 unsigned int mask;
636 u16 ents[0];
637};
638#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000639 ((_num) * sizeof(u16)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700640
641#define RPS_NO_CPU 0xffff
642
643static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
644 u32 hash)
645{
646 if (table && hash) {
647 unsigned int cpu, index = hash & table->mask;
648
649 /* We only give a hint, preemption can change cpu under us */
650 cpu = raw_smp_processor_id();
651
652 if (table->ents[index] != cpu)
653 table->ents[index] = cpu;
654 }
655}
656
657static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
658 u32 hash)
659{
660 if (table && hash)
661 table->ents[hash & table->mask] = RPS_NO_CPU;
662}
663
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000664extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
Tom Herbertfec5e652010-04-16 16:01:27 -0700665
Ben Hutchingsc4454772011-01-19 11:03:53 +0000666#ifdef CONFIG_RFS_ACCEL
667extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
668 u32 flow_id, u16 filter_id);
669#endif
670
Tom Herbert0a9627f2010-03-16 08:03:29 +0000671/* This structure contains an instance of an RX queue. */
672struct netdev_rx_queue {
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table;
675 struct kobject kobj;
Tom Herbertfe822242010-11-09 10:47:38 +0000676 struct net_device *dev;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000677} ____cacheline_aligned_in_smp;
Tom Herbertfec5e652010-04-16 16:01:27 -0700678#endif /* CONFIG_RPS */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800679
Tom Herbertbf264142010-11-26 08:36:09 +0000680#ifdef CONFIG_XPS
681/*
682 * This structure holds an XPS map which can be of variable length. The
683 * map is an array of queues.
684 */
685struct xps_map {
686 unsigned int len;
687 unsigned int alloc_len;
688 struct rcu_head rcu;
689 u16 queues[0];
690};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000691#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
Tom Herbertbf264142010-11-26 08:36:09 +0000692#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
693 / sizeof(u16))
694
695/*
696 * This structure holds all XPS maps for device. Maps are indexed by CPU.
697 */
698struct xps_dev_maps {
699 struct rcu_head rcu;
Eric Dumazeta4177862010-11-28 21:43:02 +0000700 struct xps_map __rcu *cpu_map[0];
Tom Herbertbf264142010-11-26 08:36:09 +0000701};
702#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
703 (nr_cpu_ids * sizeof(struct xps_map *)))
704#endif /* CONFIG_XPS */
705
John Fastabend4f57c082011-01-17 08:06:04 +0000706#define TC_MAX_QUEUE 16
707#define TC_BITMASK 15
708/* HW offloaded queuing disciplines txq count and offset maps */
709struct netdev_tc_txq {
710 u16 count;
711 u16 offset;
712};
713
Neerav Parikh68bad942012-01-04 20:23:39 +0000714#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
715/*
716 * This structure is to hold information about the device
717 * configured to run FCoE protocol stack.
718 */
719struct netdev_fcoe_hbainfo {
720 char manufacturer[64];
721 char serial_number[64];
722 char hardware_version[64];
723 char driver_version[64];
724 char optionrom_version[64];
725 char firmware_version[64];
726 char model[256];
727 char model_description[256];
728};
729#endif
730
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800731/*
732 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800733 * The following hooks can be defined; unless noted otherwise, they are
734 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800735 *
736 * int (*ndo_init)(struct net_device *dev);
737 * This function is called once when network device is registered.
738 * The network device can use this to any late stage initializaton
739 * or semantic validattion. It can fail with an error code which will
740 * be propogated back to register_netdev
741 *
742 * void (*ndo_uninit)(struct net_device *dev);
743 * This function is called when device is unregistered or when registration
744 * fails. It is not called if init fails.
745 *
746 * int (*ndo_open)(struct net_device *dev);
747 * This function is called when network device transistions to the up
748 * state.
749 *
750 * int (*ndo_stop)(struct net_device *dev);
751 * This function is called when network device transistions to the down
752 * state.
753 *
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000754 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
755 * struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800756 * Called when a packet needs to be transmitted.
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000757 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
758 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
Stephen Hemminger00829822008-11-20 20:14:53 -0800759 * Required can not be NULL.
760 *
761 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
762 * Called to decide which queue to when device supports multiple
763 * transmit queues.
764 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800765 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
766 * This function is called to allow device receiver to make
767 * changes to configuration when multicast or promiscious is enabled.
768 *
769 * void (*ndo_set_rx_mode)(struct net_device *dev);
770 * This function is called device changes address list filtering.
Jiri Pirko01789342011-08-16 06:29:00 +0000771 * If driver handles unicast address filtering, it should set
772 * IFF_UNICAST_FLT to its priv_flags.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800773 *
774 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
775 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -0700776 * needs to be changed. If this interface is not defined, the
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800777 * mac address can not be changed.
778 *
779 * int (*ndo_validate_addr)(struct net_device *dev);
780 * Test if Media Access Control address is valid for the device.
781 *
782 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
783 * Called when a user request an ioctl which can't be handled by
784 * the generic interface code. If not defined ioctl's return
785 * not supported error code.
786 *
787 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
788 * Used to set network devices bus interface parameters. This interface
789 * is retained for legacy reason, new devices should use the bus
790 * interface (PCI) for low level management.
791 *
792 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
793 * Called when a user wants to change the Maximum Transfer Unit
794 * of a device. If not defined, any request to change MTU will
795 * will return an error.
796 *
Stephen Hemminger00829822008-11-20 20:14:53 -0800797 * void (*ndo_tx_timeout)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800798 * Callback uses when the transmitter has not made any progress
799 * for dev->watchdog ticks.
800 *
Ben Hutchings3cfde792010-07-09 09:11:52 +0000801 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
Eric Dumazet28172732010-07-07 14:58:56 -0700802 * struct rtnl_link_stats64 *storage);
Wolfram Sangd308e382009-10-07 13:53:11 -0700803 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800804 * Called when a user wants to get the network device usage
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000805 * statistics. Drivers must do one of the following:
Ben Hutchings3cfde792010-07-09 09:11:52 +0000806 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
807 * rtnl_link_stats64 structure passed by the caller.
Ben Hutchings82695d92010-06-15 15:08:48 -0700808 * 2. Define @ndo_get_stats to update a net_device_stats structure
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000809 * (which should normally be dev->stats) and return a pointer to
810 * it. The structure may be changed asynchronously only if each
811 * field is written atomically.
812 * 3. Update dev->stats asynchronously and atomically, and define
813 * neither operation.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800814 *
Patrick McHardy80d5c362013-04-19 02:04:28 +0000815 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
816 * If device support VLAN filtering this function is called when a
817 * VLAN id is registered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800818 *
Jiri Pirko8e586132011-12-08 19:52:37 -0500819 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000820 * If device support VLAN filtering this function is called when a
821 * VLAN id is unregistered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800822 *
823 * void (*ndo_poll_controller)(struct net_device *dev);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000824 *
825 * SR-IOV management functions.
826 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
827 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
828 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
Greg Rose5f8444a2011-10-08 03:05:24 +0000829 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000830 * int (*ndo_get_vf_config)(struct net_device *dev,
831 * int vf, struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +0300832 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -0700833 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
834 * struct nlattr *port[]);
835 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +0000836 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
837 * Called to setup 'tc' number of traffic classes in the net device. This
838 * is always called from the stack with the rtnl lock held and netif tx
839 * queues stopped. This allows the netdevice to perform queue management
840 * safely.
Ben Hutchingsc4454772011-01-19 11:03:53 +0000841 *
Yi Zoue9bce842011-03-09 08:48:03 +0000842 * Fiber Channel over Ethernet (FCoE) offload functions.
843 * int (*ndo_fcoe_enable)(struct net_device *dev);
844 * Called when the FCoE protocol stack wants to start using LLD for FCoE
845 * so the underlying device can perform whatever needed configuration or
846 * initialization to support acceleration of FCoE traffic.
847 *
848 * int (*ndo_fcoe_disable)(struct net_device *dev);
849 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
850 * so the underlying device can perform whatever needed clean-ups to
851 * stop supporting acceleration of FCoE traffic.
852 *
853 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
854 * struct scatterlist *sgl, unsigned int sgc);
855 * Called when the FCoE Initiator wants to initialize an I/O that
856 * is a possible candidate for Direct Data Placement (DDP). The LLD can
857 * perform necessary setup and returns 1 to indicate the device is set up
858 * successfully to perform DDP on this I/O, otherwise this returns 0.
859 *
860 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
861 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
862 * indicated by the FC exchange id 'xid', so the underlying device can
863 * clean up and reuse resources for later DDP requests.
864 *
865 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
866 * struct scatterlist *sgl, unsigned int sgc);
867 * Called when the FCoE Target wants to initialize an I/O that
868 * is a possible candidate for Direct Data Placement (DDP). The LLD can
869 * perform necessary setup and returns 1 to indicate the device is set up
870 * successfully to perform DDP on this I/O, otherwise this returns 0.
871 *
Neerav Parikh68bad942012-01-04 20:23:39 +0000872 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
873 * struct netdev_fcoe_hbainfo *hbainfo);
874 * Called when the FCoE Protocol stack wants information on the underlying
875 * device. This information is utilized by the FCoE protocol stack to
876 * register attributes with Fiber Channel management service as per the
877 * FC-GS Fabric Device Management Information(FDMI) specification.
878 *
Yi Zoue9bce842011-03-09 08:48:03 +0000879 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
880 * Called when the underlying device wants to override default World Wide
881 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
882 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
883 * protocol stack to use.
884 *
Ben Hutchingsc4454772011-01-19 11:03:53 +0000885 * RFS acceleration.
886 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
887 * u16 rxq_index, u32 flow_id);
888 * Set hardware filter for RFS. rxq_index is the target queue index;
889 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
890 * Return the filter ID on success, or a negative error code.
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000891 *
Jiri Pirko8b98a702013-01-03 22:49:02 +0000892 * Slave management functions (for bridge, bonding, etc).
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000893 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
894 * Called to make another netdev an underling.
895 *
896 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
897 * Called to release previously enslaved netdev.
Michał Mirosław5455c692011-02-15 16:59:17 +0000898 *
899 * Feature/offload setting functions.
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000900 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
901 * netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000902 * Adjusts the requested feature flags according to device-specific
903 * constraints, and returns the resulting flags. Must not modify
904 * the device state.
905 *
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000906 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000907 * Called to update device configuration to new features. Passed
908 * feature set might be less than what was returned by ndo_fix_features()).
909 * Must return >0 or -errno if it changed dev->features itself.
910 *
stephen hemmingeredc7d572012-10-01 12:32:33 +0000911 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
912 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000913 * const unsigned char *addr, u16 flags)
John Fastabend77162022012-04-15 06:43:56 +0000914 * Adds an FDB entry to dev for addr.
Vlad Yasevich1690be62013-02-13 12:00:18 +0000915 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
916 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000917 * const unsigned char *addr)
John Fastabend77162022012-04-15 06:43:56 +0000918 * Deletes the FDB entry from dev coresponding to addr.
919 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
920 * struct net_device *dev, int idx)
921 * Used to add FDB entries to dump requests. Implementers should add
922 * entries to skb and update idx with the number of entries.
John Fastabende5a55a82012-10-24 08:12:57 +0000923 *
924 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
925 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
Dmitry Kravkov24f11a52013-03-27 06:54:00 +0000926 * struct net_device *dev, u32 filter_mask)
Jiri Pirko4bf84c32012-12-27 23:49:37 +0000927 *
928 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
929 * Called to change device carrier. Soft-devices (like dummy, team, etc)
930 * which do not represent real hardware may define this to allow their
931 * userspace components to manage their virtual carrier state. Devices
932 * that determine carrier state from physical hardware properties (eg
933 * network cables) or protocol-dependent mechanisms (eg
934 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800935 */
936struct net_device_ops {
937 int (*ndo_init)(struct net_device *dev);
938 void (*ndo_uninit)(struct net_device *dev);
939 int (*ndo_open)(struct net_device *dev);
940 int (*ndo_stop)(struct net_device *dev);
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000941 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
Stephen Hemminger00829822008-11-20 20:14:53 -0800942 struct net_device *dev);
943 u16 (*ndo_select_queue)(struct net_device *dev,
944 struct sk_buff *skb);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800945 void (*ndo_change_rx_flags)(struct net_device *dev,
946 int flags);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800947 void (*ndo_set_rx_mode)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800948 int (*ndo_set_mac_address)(struct net_device *dev,
949 void *addr);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800950 int (*ndo_validate_addr)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800951 int (*ndo_do_ioctl)(struct net_device *dev,
952 struct ifreq *ifr, int cmd);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800953 int (*ndo_set_config)(struct net_device *dev,
954 struct ifmap *map);
Stephen Hemminger00829822008-11-20 20:14:53 -0800955 int (*ndo_change_mtu)(struct net_device *dev,
956 int new_mtu);
957 int (*ndo_neigh_setup)(struct net_device *dev,
958 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800959 void (*ndo_tx_timeout) (struct net_device *dev);
960
Eric Dumazet28172732010-07-07 14:58:56 -0700961 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
962 struct rtnl_link_stats64 *storage);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800963 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
964
Jiri Pirko8e586132011-12-08 19:52:37 -0500965 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +0000966 __be16 proto, u16 vid);
Jiri Pirko8e586132011-12-08 19:52:37 -0500967 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +0000968 __be16 proto, u16 vid);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800969#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800970 void (*ndo_poll_controller)(struct net_device *dev);
Herbert Xu4247e162010-06-10 16:12:47 +0000971 int (*ndo_netpoll_setup)(struct net_device *dev,
Amerigo Wang47be03a22012-08-10 01:24:37 +0000972 struct netpoll_info *info,
973 gfp_t gfp);
WANG Cong0e34e932010-05-06 00:47:21 -0700974 void (*ndo_netpoll_cleanup)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800975#endif
Cong Wange0d10952013-08-01 11:10:25 +0800976#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir8b80cda2013-07-10 17:13:26 +0300977 int (*ndo_busy_poll)(struct napi_struct *dev);
Eliezer Tamir06021292013-06-10 11:39:50 +0300978#endif
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000979 int (*ndo_set_vf_mac)(struct net_device *dev,
980 int queue, u8 *mac);
981 int (*ndo_set_vf_vlan)(struct net_device *dev,
982 int queue, u16 vlan, u8 qos);
983 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
984 int vf, int rate);
Greg Rose5f8444a2011-10-08 03:05:24 +0000985 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
986 int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000987 int (*ndo_get_vf_config)(struct net_device *dev,
988 int vf,
989 struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +0300990 int (*ndo_set_vf_link_state)(struct net_device *dev,
991 int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -0700992 int (*ndo_set_vf_port)(struct net_device *dev,
993 int vf,
994 struct nlattr *port[]);
995 int (*ndo_get_vf_port)(struct net_device *dev,
996 int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +0000997 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000998#if IS_ENABLED(CONFIG_FCOE)
Yi Zoucb454392009-08-31 12:31:36 +0000999 int (*ndo_fcoe_enable)(struct net_device *dev);
1000 int (*ndo_fcoe_disable)(struct net_device *dev);
Yi Zou4d288d52009-02-27 14:06:59 -08001001 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1002 u16 xid,
1003 struct scatterlist *sgl,
1004 unsigned int sgc);
1005 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1006 u16 xid);
Yi Zou6247e082011-02-01 07:22:06 +00001007 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1008 u16 xid,
1009 struct scatterlist *sgl,
1010 unsigned int sgc);
Neerav Parikh68bad942012-01-04 20:23:39 +00001011 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1012 struct netdev_fcoe_hbainfo *hbainfo);
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001013#endif
1014
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001015#if IS_ENABLED(CONFIG_LIBFCOE)
Yi Zoudf5c7942009-10-28 18:24:35 +00001016#define NETDEV_FCOE_WWNN 0
1017#define NETDEV_FCOE_WWPN 1
1018 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1019 u64 *wwn, int type);
Yi Zou4d288d52009-02-27 14:06:59 -08001020#endif
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001021
Ben Hutchingsc4454772011-01-19 11:03:53 +00001022#ifdef CONFIG_RFS_ACCEL
1023 int (*ndo_rx_flow_steer)(struct net_device *dev,
1024 const struct sk_buff *skb,
1025 u16 rxq_index,
1026 u32 flow_id);
1027#endif
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001028 int (*ndo_add_slave)(struct net_device *dev,
1029 struct net_device *slave_dev);
1030 int (*ndo_del_slave)(struct net_device *dev,
1031 struct net_device *slave_dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001032 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1033 netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001034 int (*ndo_set_features)(struct net_device *dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001035 netdev_features_t features);
David Millerda6a8fa2011-07-25 00:01:38 +00001036 int (*ndo_neigh_construct)(struct neighbour *n);
David S. Miller447f2192011-12-19 15:04:41 -05001037 void (*ndo_neigh_destroy)(struct neighbour *n);
John Fastabend77162022012-04-15 06:43:56 +00001038
1039 int (*ndo_fdb_add)(struct ndmsg *ndm,
stephen hemmingeredc7d572012-10-01 12:32:33 +00001040 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001041 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001042 const unsigned char *addr,
John Fastabend77162022012-04-15 06:43:56 +00001043 u16 flags);
1044 int (*ndo_fdb_del)(struct ndmsg *ndm,
Vlad Yasevich1690be62013-02-13 12:00:18 +00001045 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001046 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001047 const unsigned char *addr);
John Fastabend77162022012-04-15 06:43:56 +00001048 int (*ndo_fdb_dump)(struct sk_buff *skb,
1049 struct netlink_callback *cb,
1050 struct net_device *dev,
1051 int idx);
John Fastabende5a55a82012-10-24 08:12:57 +00001052
1053 int (*ndo_bridge_setlink)(struct net_device *dev,
1054 struct nlmsghdr *nlh);
1055 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1056 u32 pid, u32 seq,
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00001057 struct net_device *dev,
1058 u32 filter_mask);
Vlad Yasevich407af322013-02-13 12:00:12 +00001059 int (*ndo_bridge_dellink)(struct net_device *dev,
1060 struct nlmsghdr *nlh);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001061 int (*ndo_change_carrier)(struct net_device *dev,
1062 bool new_carrier);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001063};
1064
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001065/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 * The DEVICE structure.
1067 * Actually, this whole structure is a big mistake. It mixes I/O
1068 * data with strictly "high-level" data, and it has to know about
1069 * almost every data structure used in the INET module.
1070 *
1071 * FIXME: cleanup struct net_device such that network protocol info
1072 * moves out.
1073 */
1074
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001075struct net_device {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
1077 /*
1078 * This is the first field of the "visible" part of this structure
1079 * (i.e. as seen by users in the "Space.c" file). It is the name
Justin P. Mattock724df612010-05-26 09:22:40 -07001080 * of the interface.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 */
1082 char name[IFNAMSIZ];
Mark Grossed771342010-05-06 01:59:26 +02001083
Eric Dumazet91364612012-06-11 06:36:13 +00001084 /* device name hash chain, please keep it close to name[] */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001085 struct hlist_node name_hlist;
Eric Dumazet91364612012-06-11 06:36:13 +00001086
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001087 /* snmp alias */
1088 char *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
1090 /*
1091 * I/O specific fields
1092 * FIXME: Merge these and struct ifmap into one
1093 */
1094 unsigned long mem_end; /* shared mem end */
1095 unsigned long mem_start; /* shared mem start */
1096 unsigned long base_addr; /* device I/O address */
1097 unsigned int irq; /* device IRQ number */
1098
1099 /*
1100 * Some hardware also needs these fields, but they are not
1101 * part of the usual set specified in Space.c.
1102 */
1103
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 unsigned long state;
1105
Pavel Emelianov7562f872007-05-03 15:13:45 -07001106 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001107 struct list_head napi_list;
Eric Dumazet44a08732009-10-27 07:03:04 +00001108 struct list_head unreg_list;
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001109 struct list_head upper_dev_list; /* List of upper devices */
1110
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Michał Mirosław5455c692011-02-15 16:59:17 +00001112 /* currently active device features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001113 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00001114 /* user-changeable features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001115 netdev_features_t hw_features;
Michał Mirosław5455c692011-02-15 16:59:17 +00001116 /* user-requested features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001117 netdev_features_t wanted_features;
Michał Mirosław1aac6262011-04-12 04:07:39 +00001118 /* mask of features inheritable by VLAN devices */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001119 netdev_features_t vlan_features;
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00001120 /* mask of features inherited by encapsulating devices
1121 * This field indicates what encapsulation offloads
1122 * the hardware is capable of doing, and drivers will
1123 * need to set them appropriately.
1124 */
1125 netdev_features_t hw_enc_features;
Simon Horman0d89d202013-05-23 21:02:52 +00001126 /* mask of fetures inheritable by MPLS */
1127 netdev_features_t mpls_features;
Michał Mirosław04ed3e72011-01-24 15:32:47 -08001128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 /* Interface index. Unique device identifier */
1130 int ifindex;
1131 int iflink;
1132
Rusty Russellc45d2862007-03-28 14:29:08 -07001133 struct net_device_stats stats;
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001134 atomic_long_t rx_dropped; /* dropped packets by core network
1135 * Do not use this in drivers.
1136 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
Johannes Bergb86e0282007-04-26 20:48:23 -07001138#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 /* List of functions to handle Wireless Extensions (instead of ioctl).
1140 * See <net/iw_handler.h> for details. Jean II */
1141 const struct iw_handler_def * wireless_handlers;
1142 /* Instance data managed by the core of Wireless Extensions. */
1143 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -07001144#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001145 /* Management operations */
1146 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -07001147 const struct ethtool_ops *ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001149 /* Hardware header description */
1150 const struct header_ops *header_ops;
1151
Stefan Rompfb00055a2006-03-20 17:09:11 -08001152 unsigned int flags; /* interface flags (a la BSD) */
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001153 unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
1154 * See if.h for definitions. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 unsigned short gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 unsigned short padded; /* How much padding added by alloc_netdev() */
1157
Stefan Rompfb00055a2006-03-20 17:09:11 -08001158 unsigned char operstate; /* RFC2863 operstate */
1159 unsigned char link_mode; /* mapping policy to operstate */
1160
Joe Perchesbdc220d2011-05-09 17:42:46 +00001161 unsigned char if_port; /* Selectable AUI, TP,..*/
1162 unsigned char dma; /* DMA channel */
1163
David S. Millercd7b5392010-05-02 22:27:59 -07001164 unsigned int mtu; /* interface MTU value */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 unsigned short type; /* interface hardware type */
1166 unsigned short hard_header_len; /* hardware hdr length */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Johannes Bergf5184d22008-05-12 20:48:31 -07001168 /* extra head- and tailroom the hardware may need, but not in all cases
1169 * can this be guaranteed, especially tailroom. Some cases also use
1170 * LL_MAX_HEADER instead to allocate the skb.
1171 */
1172 unsigned short needed_headroom;
1173 unsigned short needed_tailroom;
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 /* Interface address info. */
Jon Wetzela6f9a702005-08-20 17:15:54 -07001176 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
Stefan Assmannc1f79422010-07-22 02:50:21 +00001177 unsigned char addr_assign_type; /* hw address assignment type */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 unsigned char addr_len; /* hardware address length */
David Miller596b9b62011-07-25 00:01:25 +00001179 unsigned char neigh_priv_len;
Narendra Kdffebd22013-06-10 19:34:03 +05301180 unsigned short dev_id; /* Used to differentiate devices
1181 * that share the same link
1182 * layer address
1183 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00001184 spinlock_t addr_list_lock;
Jiri Pirko22bedad32010-04-01 21:22:57 +00001185 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1186 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001187 struct netdev_hw_addr_list dev_addrs; /* list of device
1188 * hw addresses
1189 */
1190#ifdef CONFIG_SYSFS
1191 struct kset *queues_kset;
1192#endif
1193
Joe Perches2d348d12011-07-25 16:17:35 -07001194 bool uc_promisc;
Wang Chen9d45abe2008-06-17 21:12:48 -07001195 unsigned int promiscuity;
1196 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 /* Protocol specific pointers */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001200
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001201#if IS_ENABLED(CONFIG_VLAN_8021Q)
Jiri Pirko5b9ea6e2011-12-08 04:11:18 +00001202 struct vlan_info __rcu *vlan_info; /* VLAN info */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001203#endif
Ben Hutchings34a430d2011-11-25 14:38:38 +00001204#if IS_ENABLED(CONFIG_NET_DSA)
Ben Hutchingscf50dcc2011-11-25 14:32:52 +00001205 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001206#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 void *atalk_ptr; /* AppleTalk link */
Eric Dumazet95ae6b22010-09-15 04:04:31 +00001208 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
Eric Dumazetfc766e4c2010-10-29 03:09:24 +00001209 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
Eric Dumazet198caec2010-10-24 21:32:05 +00001210 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 void *ax25_ptr; /* AX.25 specific data */
Johannes Berg704232c2007-04-23 12:20:05 -07001212 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1213 assign before registering */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001215/*
Eric Dumazetcd135392010-09-16 02:58:13 +00001216 * Cache lines mostly used on receive path (including eth_type_trans())
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001217 */
Eric Dumazet4dc89132010-08-31 07:40:16 +00001218 unsigned long last_rx; /* Time of last Rx
1219 * This should not be set in
1220 * drivers, unless really needed,
1221 * because network stack (bonding)
1222 * use it if/when necessary, to
1223 * avoid dirtying this cache line.
1224 */
1225
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001226 /* Interface address info used in eth_type_trans() */
Jiri Pirkof001fde2009-05-05 02:48:28 +00001227 unsigned char *dev_addr; /* hw address, (before bcast
1228 because most packets are
1229 unicast) */
1230
Tom Herbert0a9627f2010-03-16 08:03:29 +00001231
david decotignyccf5ff62011-11-16 12:15:10 +00001232#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001233 struct netdev_rx_queue *_rx;
1234
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001235 /* Number of RX queues allocated at register_netdev() time */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001236 unsigned int num_rx_queues;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001237
1238 /* Number of RX queues currently active in device */
1239 unsigned int real_num_rx_queues;
Ben Hutchingsc4454772011-01-19 11:03:53 +00001240
Eric Dumazetdf334542010-03-24 19:13:54 +00001241#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001242
stephen hemminger61391cd2010-11-15 06:38:12 +00001243 rx_handler_func_t __rcu *rx_handler;
1244 void __rcu *rx_handler_data;
David S. Millere8a04642008-07-17 00:34:19 -07001245
Eric Dumazet24824a02010-10-02 06:11:55 +00001246 struct netdev_queue __rcu *ingress_queue;
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001247 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1248
Eric Dumazetcd135392010-09-16 02:58:13 +00001249
1250/*
1251 * Cache lines mostly used on transmit path
1252 */
David S. Millere8a04642008-07-17 00:34:19 -07001253 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001254
1255 /* Number of TX queues allocated at alloc_netdev_mq() time */
David S. Millere8a04642008-07-17 00:34:19 -07001256 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001257
1258 /* Number of TX queues currently active in device */
1259 unsigned int real_num_tx_queues;
1260
Patrick McHardyaf356af2009-09-04 06:41:18 +00001261 /* root qdisc from userspace point of view */
1262 struct Qdisc *qdisc;
1263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 unsigned long tx_queue_len; /* Max frames per queue allowed */
David S. Millerc3f26a22008-07-31 16:58:50 -07001265 spinlock_t tx_global_lock;
Eric Dumazetcd135392010-09-16 02:58:13 +00001266
Tom Herbertbf264142010-11-26 08:36:09 +00001267#ifdef CONFIG_XPS
Eric Dumazeta4177862010-11-28 21:43:02 +00001268 struct xps_dev_maps __rcu *xps_maps;
Tom Herbertbf264142010-11-26 08:36:09 +00001269#endif
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001270#ifdef CONFIG_RFS_ACCEL
1271 /* CPU reverse-mapping for RX completion interrupts, indexed
1272 * by RX queue number. Assigned by driver. This must only be
1273 * set if the ndo_rx_flow_steer operation is defined. */
1274 struct cpu_rmap *rx_cpu_rmap;
1275#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001276
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001277 /* These may be needed for future network-power-down code. */
Eric Dumazet9d214932009-05-17 20:55:16 -07001278
1279 /*
1280 * trans_start here is expensive for high speed devices on SMP,
1281 * please use netdev_queue->trans_start instead.
1282 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001283 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1284
1285 int watchdog_timeo; /* used by dev_watchdog() */
1286 struct timer_list watchdog_timer;
1287
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 /* Number of references to this device */
Eric Dumazet29b44332010-10-11 10:22:12 +00001289 int __percpu *pcpu_refcnt;
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /* delayed register/unregister */
1292 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 /* device index hash chain */
1294 struct hlist_node index_hlist;
1295
Eric Dumazete014deb2009-11-17 05:59:21 +00001296 struct list_head link_watch_list;
Herbert Xu572a1032007-05-08 18:34:17 -07001297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 /* register/unregister state machine */
1299 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07001300 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 NETREG_UNREGISTERING, /* called unregister_netdevice */
1302 NETREG_UNREGISTERED, /* completed unregister todo */
1303 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001304 NETREG_DUMMY, /* dummy device for NAPI poll */
Eric Dumazet449f4542011-05-19 12:24:16 +00001305 } reg_state:8;
1306
1307 bool dismantle; /* device is going do be freed */
Patrick McHardya2835762010-02-26 06:34:51 +00001308
1309 enum {
1310 RTNL_LINK_INITIALIZED,
1311 RTNL_LINK_INITIALIZING,
1312 } rtnl_link_state:16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001314 /* Called from unregister, can be used to call free_netdev */
1315 void (*destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317#ifdef CONFIG_NETPOLL
Cong Wang5fbee842013-01-22 21:29:39 +00001318 struct netpoll_info __rcu *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319#endif
David S. Millereae792b2008-07-15 03:03:33 -07001320
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001321#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001322 /* Network namespace this network device is inside */
1323 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001324#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001325
David S. Miller49517042008-05-12 03:29:11 -07001326 /* mid-layer private */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001327 union {
1328 void *ml_priv;
1329 struct pcpu_lstats __percpu *lstats; /* loopback stats */
Eric Dumazet290b8952010-09-27 00:33:35 +00001330 struct pcpu_tstats __percpu *tstats; /* tunnel stats */
Eric Dumazet6d81f412010-09-27 20:50:33 +00001331 struct pcpu_dstats __percpu *dstats; /* dummy stats */
Eric Dumazet26811282012-12-29 16:02:43 +00001332 struct pcpu_vstats __percpu *vstats; /* veth stats */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001333 };
Patrick McHardyeca9eba2008-07-05 21:26:13 -07001334 /* GARP */
Eric Dumazet3cc77ec2010-10-24 21:32:36 +00001335 struct garp_port __rcu *garp_port;
David Wardfebf0182013-02-08 17:17:06 +00001336 /* MRP */
1337 struct mrp_port __rcu *mrp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 /* class/net/name entry */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001340 struct device dev;
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001341 /* space for optional device, statistics, and wireless sysfs groups */
1342 const struct attribute_group *sysfs_groups[4];
Patrick McHardy38f7b872007-06-13 12:03:51 -07001343
1344 /* rtnetlink link ops */
1345 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001346
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001347 /* for setting kernel sock attribute on TCP connection setup */
1348#define GSO_MAX_SIZE 65536
1349 unsigned int gso_max_size;
Ben Hutchings30b678d2012-07-30 15:57:00 +00001350#define GSO_MAX_SEGS 65535
1351 u16 gso_max_segs;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001352
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08001353#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08001354 /* Data Center Bridging netlink ops */
Stephen Hemminger32953542009-10-05 06:01:03 +00001355 const struct dcbnl_rtnl_ops *dcbnl_ops;
Alexander Duyck2f90b862008-11-20 20:52:10 -08001356#endif
John Fastabend4f57c082011-01-17 08:06:04 +00001357 u8 num_tc;
1358 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1359 u8 prio_tc_map[TC_BITMASK + 1];
Alexander Duyck2f90b862008-11-20 20:52:10 -08001360
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001361#if IS_ENABLED(CONFIG_FCOE)
Yi Zou4d288d52009-02-27 14:06:59 -08001362 /* max exchange id for FCoE LRO by ddp */
1363 unsigned int fcoe_ddp_xid;
1364#endif
Neil Horman5bc14212011-11-22 05:10:51 +00001365#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1366 struct netprio_map __rcu *priomap;
1367#endif
Richard Cochranc1f19b52010-07-17 08:49:36 +00001368 /* phy device may attach itself for hardware timestamping */
1369 struct phy_device *phydev;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00001370
Eric Dumazet23d3b8b2012-09-05 01:02:56 +00001371 struct lock_class_key *qdisc_tx_busylock;
1372
Vlad Dogarucbda10f2011-01-13 23:38:30 +00001373 /* group the device belongs to */
1374 int group;
Eric Dumazet91364612012-06-11 06:36:13 +00001375
1376 struct pm_qos_request pm_qos_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001378#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
David S. Millere8a04642008-07-17 00:34:19 -07001382static inline
John Fastabend4f57c082011-01-17 08:06:04 +00001383int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1384{
1385 return dev->prio_tc_map[prio & TC_BITMASK];
1386}
1387
1388static inline
1389int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1390{
1391 if (tc >= dev->num_tc)
1392 return -EINVAL;
1393
1394 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1395 return 0;
1396}
1397
1398static inline
1399void netdev_reset_tc(struct net_device *dev)
1400{
1401 dev->num_tc = 0;
1402 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1403 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1404}
1405
1406static inline
1407int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1408{
1409 if (tc >= dev->num_tc)
1410 return -EINVAL;
1411
1412 dev->tc_to_txq[tc].count = count;
1413 dev->tc_to_txq[tc].offset = offset;
1414 return 0;
1415}
1416
1417static inline
1418int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1419{
1420 if (num_tc > TC_MAX_QUEUE)
1421 return -EINVAL;
1422
1423 dev->num_tc = num_tc;
1424 return 0;
1425}
1426
1427static inline
1428int netdev_get_num_tc(struct net_device *dev)
1429{
1430 return dev->num_tc;
1431}
1432
1433static inline
David S. Millere8a04642008-07-17 00:34:19 -07001434struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1435 unsigned int index)
1436{
1437 return &dev->_tx[index];
1438}
1439
1440static inline void netdev_for_each_tx_queue(struct net_device *dev,
1441 void (*f)(struct net_device *,
1442 struct netdev_queue *,
1443 void *),
1444 void *arg)
1445{
1446 unsigned int i;
1447
1448 for (i = 0; i < dev->num_tx_queues; i++)
1449 f(dev, &dev->_tx[i], arg);
1450}
1451
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00001452extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1453 struct sk_buff *skb);
Alexander Duyck416186f2013-01-10 08:56:51 +00001454extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00001455
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001456/*
1457 * Net namespace inlines
1458 */
1459static inline
1460struct net *dev_net(const struct net_device *dev)
1461{
Eric Dumazetc2d9ba92010-06-01 06:51:19 +00001462 return read_pnet(&dev->nd_net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001463}
1464
1465static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -07001466void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001467{
1468#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -07001469 release_net(dev->nd_net);
1470 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001471#endif
1472}
1473
Lennert Buytenhekcf85d082008-10-07 13:45:02 +00001474static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1475{
1476#ifdef CONFIG_NET_DSA_TAG_DSA
1477 if (dev->dsa_ptr != NULL)
1478 return dsa_uses_dsa_tags(dev->dsa_ptr);
1479#endif
1480
1481 return 0;
1482}
1483
Lennert Buytenhek396138f02008-10-07 13:46:07 +00001484static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1485{
1486#ifdef CONFIG_NET_DSA_TAG_TRAILER
1487 if (dev->dsa_ptr != NULL)
1488 return dsa_uses_trailer_tags(dev->dsa_ptr);
1489#endif
1490
1491 return 0;
1492}
1493
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001494/**
1495 * netdev_priv - access network device private data
1496 * @dev: network device
1497 *
1498 * Get network device private data
1499 */
Patrick McHardy6472ce62007-06-13 12:03:21 -07001500static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00001502 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503}
1504
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505/* Set the sysfs physical device reference for the network logical device
1506 * if set prior to registration will cause a symlink during initialization.
1507 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001508#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Marcel Holtmann384912e2009-08-31 21:08:19 +00001510/* Set the sysfs device type for the network logical device to allow
1511 * fin grained indentification of different network device types. For
1512 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1513 */
1514#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1515
Eric Dumazet82dc3c62013-03-05 15:57:22 +00001516/* Default NAPI poll() weight
1517 * Device drivers are strongly advised to not use bigger value
1518 */
1519#define NAPI_POLL_WEIGHT 64
1520
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07001521/**
1522 * netif_napi_add - initialize a napi context
1523 * @dev: network device
1524 * @napi: napi context
1525 * @poll: polling function
1526 * @weight: default weight
1527 *
1528 * netif_napi_add() must be used to initialize a napi context prior to calling
1529 * *any* of the other napi related functions.
1530 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001531void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1532 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001533
Alexander Duyckd8156532008-07-08 15:13:05 -07001534/**
1535 * netif_napi_del - remove a napi context
1536 * @napi: napi context
1537 *
1538 * netif_napi_del() removes a napi context from the network device napi list
1539 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001540void netif_napi_del(struct napi_struct *napi);
1541
1542struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00001543 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1544 void *frag0;
1545
Herbert Xu74895942009-05-26 18:50:27 +00001546 /* Length of frag0. */
1547 unsigned int frag0_len;
1548
Herbert Xu86911732009-01-29 14:19:50 +00001549 /* This indicates where we are processing relative to skb->data. */
1550 int data_offset;
1551
Herbert Xud565b0a2008-12-15 23:38:52 -08001552 /* This is non-zero if the packet cannot be merged with the new skb. */
1553 int flush;
1554
1555 /* Number of segments aggregated. */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001556 u16 count;
1557
1558 /* This is non-zero if the packet may be of the same flow. */
1559 u8 same_flow;
Herbert Xu5d38a072009-01-04 16:13:40 -08001560
1561 /* Free the skb? */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001562 u8 free;
Eric Dumazetd7e88832012-04-30 08:10:34 +00001563#define NAPI_GRO_FREE 1
1564#define NAPI_GRO_FREE_STOLEN_HEAD 2
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001565
1566 /* jiffies when first packet was created/queued */
1567 unsigned long age;
Eric Dumazet86347242012-10-08 21:38:50 +02001568
1569 /* Used in ipv6_gro_receive() */
1570 int proto;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00001571
1572 /* used in skb_gro_receive() slow path */
1573 struct sk_buff *last;
Herbert Xud565b0a2008-12-15 23:38:52 -08001574};
1575
1576#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07001577
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001579 __be16 type; /* This is really htons(ether_type). */
1580 struct net_device *dev; /* NULL is wildcarded here */
1581 int (*func) (struct sk_buff *,
1582 struct net_device *,
1583 struct packet_type *,
1584 struct net_device *);
Eric Leblondc0de08d2012-08-16 22:02:58 +00001585 bool (*id_match)(struct packet_type *ptype,
1586 struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 void *af_packet_priv;
1588 struct list_head list;
1589};
1590
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001591struct offload_callbacks {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1593 netdev_features_t features);
1594 int (*gso_send_check)(struct sk_buff *skb);
1595 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1596 struct sk_buff *skb);
1597 int (*gro_complete)(struct sk_buff *skb);
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001598};
1599
1600struct packet_offload {
1601 __be16 type; /* This is really htons(ether_type). */
1602 struct offload_callbacks callbacks;
1603 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604};
1605
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606#include <linux/notifier.h>
1607
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001608/* netdevice notifier chain. Please remember to update the rtnetlink
1609 * notification exclusion list in rtnetlink_event() when adding new
1610 * types.
1611 */
1612#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1613#define NETDEV_DOWN 0x0002
1614#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1615 detected a hardware crash and restarted
1616 - we can use this eg to kick tcp sessions
1617 once done */
1618#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1619#define NETDEV_REGISTER 0x0005
1620#define NETDEV_UNREGISTER 0x0006
1621#define NETDEV_CHANGEMTU 0x0007
1622#define NETDEV_CHANGEADDR 0x0008
1623#define NETDEV_GOING_DOWN 0x0009
1624#define NETDEV_CHANGENAME 0x000A
1625#define NETDEV_FEAT_CHANGE 0x000B
1626#define NETDEV_BONDING_FAILOVER 0x000C
1627#define NETDEV_PRE_UP 0x000D
1628#define NETDEV_PRE_TYPE_CHANGE 0x000E
1629#define NETDEV_POST_TYPE_CHANGE 0x000F
1630#define NETDEV_POST_INIT 0x0010
Eric Dumazet0115e8e2012-08-22 17:19:46 +00001631#define NETDEV_UNREGISTER_FINAL 0x0011
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001632#define NETDEV_RELEASE 0x0012
1633#define NETDEV_NOTIFY_PEERS 0x0013
1634#define NETDEV_JOIN 0x0014
Jiri Pirko42e52bf2013-05-25 04:12:10 +00001635#define NETDEV_CHANGEUPPER 0x0015
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001636
1637extern int register_netdevice_notifier(struct notifier_block *nb);
1638extern int unregister_netdevice_notifier(struct notifier_block *nb);
Jiri Pirko351638e2013-05-28 01:30:21 +00001639
1640struct netdev_notifier_info {
1641 struct net_device *dev;
1642};
1643
Jiri Pirkobe9efd32013-05-28 01:30:22 +00001644struct netdev_notifier_change_info {
1645 struct netdev_notifier_info info; /* must be first */
1646 unsigned int flags_changed;
1647};
1648
Cong Wang75538c22013-05-29 11:30:50 +08001649static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1650 struct net_device *dev)
1651{
1652 info->dev = dev;
1653}
1654
Jiri Pirko351638e2013-05-28 01:30:21 +00001655static inline struct net_device *
1656netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1657{
1658 return info->dev;
1659}
1660
1661extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1662 struct netdev_notifier_info *info);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001663extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1664
1665
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666extern rwlock_t dev_base_lock; /* Device list lock */
1667
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001668extern seqcount_t devnet_rename_seq; /* Device rename seq */
Brian Haleyc91f6df2012-11-26 05:21:08 +00001669
Eric W. Biederman881d9662007-09-17 11:56:21 -07001670
1671#define for_each_netdev(net, d) \
1672 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
Eric W. Biedermandcbccbd42009-11-29 22:25:26 +00001673#define for_each_netdev_reverse(net, d) \
1674 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08001675#define for_each_netdev_rcu(net, d) \
1676 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
Eric W. Biederman881d9662007-09-17 11:56:21 -07001677#define for_each_netdev_safe(net, d, n) \
1678 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1679#define for_each_netdev_continue(net, d) \
1680 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
stephen hemminger254245d2009-11-10 07:54:47 +00001681#define for_each_netdev_continue_rcu(net, d) \
1682 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00001683#define for_each_netdev_in_bond_rcu(bond, slave) \
1684 for_each_netdev_rcu(&init_net, slave) \
1685 if (netdev_master_upper_dev_get_rcu(slave) == bond)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001686#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1687
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001688static inline struct net_device *next_net_device(struct net_device *dev)
1689{
1690 struct list_head *lh;
1691 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07001692
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001693 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001694 lh = dev->dev_list.next;
1695 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1696}
1697
Eric Dumazetce81b762009-11-11 17:34:30 +00001698static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1699{
1700 struct list_head *lh;
1701 struct net *net;
1702
1703 net = dev_net(dev);
Eric Dumazetccf43432011-01-26 18:08:02 +00001704 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
Eric Dumazetce81b762009-11-11 17:34:30 +00001705 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1706}
1707
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001708static inline struct net_device *first_net_device(struct net *net)
1709{
1710 return list_empty(&net->dev_base_head) ? NULL :
1711 net_device_entry(net->dev_base_head.next);
1712}
Pavel Emelianov7562f872007-05-03 15:13:45 -07001713
Eric Dumazetccf43432011-01-26 18:08:02 +00001714static inline struct net_device *first_net_device_rcu(struct net *net)
1715{
1716 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1717
1718 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1719}
1720
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721extern int netdev_boot_setup_check(struct net_device *dev);
1722extern unsigned long netdev_boot_base(const char *prefix, int unit);
Eric Dumazet941666c2010-12-05 01:23:53 +00001723extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1724 const char *hwaddr);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001725extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1726extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727extern void dev_add_pack(struct packet_type *pt);
1728extern void dev_remove_pack(struct packet_type *pt);
1729extern void __dev_remove_pack(struct packet_type *pt);
Vlad Yasevich62532da2012-11-15 08:49:10 +00001730extern void dev_add_offload(struct packet_offload *po);
1731extern void dev_remove_offload(struct packet_offload *po);
1732extern void __dev_remove_offload(struct packet_offload *po);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
Eric Dumazetbb69ae02010-06-07 11:42:13 +00001734extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1735 unsigned short mask);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001736extern struct net_device *dev_get_by_name(struct net *net, const char *name);
Eric Dumazet72c95282009-10-30 07:11:27 +00001737extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001738extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739extern int dev_alloc_name(struct net_device *dev, const char *name);
1740extern int dev_open(struct net_device *dev);
1741extern int dev_close(struct net_device *dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001742extern void dev_disable_lro(struct net_device *dev);
Michel Machado95603e22012-06-12 10:16:35 +00001743extern int dev_loopback_xmit(struct sk_buff *newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744extern int dev_queue_xmit(struct sk_buff *skb);
1745extern int register_netdevice(struct net_device *dev);
Eric Dumazet44a08732009-10-27 07:03:04 +00001746extern void unregister_netdevice_queue(struct net_device *dev,
1747 struct list_head *head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00001748extern void unregister_netdevice_many(struct list_head *head);
Eric Dumazet44a08732009-10-27 07:03:04 +00001749static inline void unregister_netdevice(struct net_device *dev)
1750{
1751 unregister_netdevice_queue(dev, NULL);
1752}
1753
Eric Dumazet29b44332010-10-11 10:22:12 +00001754extern int netdev_refcnt_read(const struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755extern void free_netdev(struct net_device *dev);
1756extern void synchronize_net(void);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001757extern int init_dummy_netdev(struct net_device *dev);
1758
Eric W. Biederman881d9662007-09-17 11:56:21 -07001759extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1760extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00001761extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001762extern int netdev_get_name(struct net *net, char *name, int ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763extern int dev_restart(struct net_device *dev);
1764#ifdef CONFIG_NETPOLL_TRAP
1765extern int netpoll_trap(void);
1766#endif
Herbert Xu86911732009-01-29 14:19:50 +00001767extern int skb_gro_receive(struct sk_buff **head,
1768 struct sk_buff *skb);
1769
1770static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1771{
1772 return NAPI_GRO_CB(skb)->data_offset;
1773}
1774
1775static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1776{
1777 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1778}
1779
1780static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1781{
1782 NAPI_GRO_CB(skb)->data_offset += len;
1783}
1784
Herbert Xua5b1cf22009-05-26 18:50:28 +00001785static inline void *skb_gro_header_fast(struct sk_buff *skb,
1786 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00001787{
Herbert Xu78a478d2009-05-26 18:50:21 +00001788 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00001789}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Herbert Xua5b1cf22009-05-26 18:50:28 +00001791static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1792{
1793 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1794}
1795
1796static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1797 unsigned int offset)
1798{
Herbert Xu17dd7592011-07-27 06:16:28 -07001799 if (!pskb_may_pull(skb, hlen))
1800 return NULL;
1801
Herbert Xua5b1cf22009-05-26 18:50:28 +00001802 NAPI_GRO_CB(skb)->frag0 = NULL;
1803 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu17dd7592011-07-27 06:16:28 -07001804 return skb->data + offset;
Herbert Xua5b1cf22009-05-26 18:50:28 +00001805}
1806
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001807static inline void *skb_gro_mac_header(struct sk_buff *skb)
1808{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001809 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001810}
1811
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001812static inline void *skb_gro_network_header(struct sk_buff *skb)
1813{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001814 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1815 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001816}
1817
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001818static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1819 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001820 const void *daddr, const void *saddr,
Eric Dumazet95c96172012-04-15 05:58:06 +00001821 unsigned int len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001822{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02001823 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001824 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001825
1826 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001827}
1828
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001829static inline int dev_parse_header(const struct sk_buff *skb,
1830 unsigned char *haddr)
1831{
1832 const struct net_device *dev = skb->dev;
1833
Patrick McHardy1b833362007-10-18 05:09:28 -07001834 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001835 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001836 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001837}
1838
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1840extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1841static inline int unregister_gifconf(unsigned int family)
1842{
1843 return register_gifconf(family, NULL);
1844}
1845
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001846#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04001847#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001848struct sd_flow_limit {
1849 u64 count;
1850 unsigned int num_buckets;
1851 unsigned int history_head;
1852 u16 history[FLOW_LIMIT_HISTORY];
1853 u8 buckets[];
1854};
1855
1856extern int netdev_flow_limit_table_len;
1857#endif /* CONFIG_NET_FLOW_LIMIT */
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859/*
Eric Dumazet88751272010-04-19 05:07:33 +00001860 * Incoming packets are placed on per-cpu queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001862struct softnet_data {
David S. Miller37437bb2008-07-16 02:15:04 -07001863 struct Qdisc *output_queue;
Changli Gaoa9cbd582010-04-26 23:06:24 +00001864 struct Qdisc **output_queue_tailp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 struct sk_buff *completion_queue;
Changli Gao6e7676c2010-04-27 15:07:33 -07001867 struct sk_buff_head process_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
Changli Gaodee42872010-05-02 05:42:16 +00001869 /* stats */
David S. Millercd7b5392010-05-02 22:27:59 -07001870 unsigned int processed;
1871 unsigned int time_squeeze;
1872 unsigned int cpu_collision;
1873 unsigned int received_rps;
Changli Gaodee42872010-05-02 05:42:16 +00001874
Changli Gaofd793d82010-04-15 00:16:59 -07001875#ifdef CONFIG_RPS
Eric Dumazet88751272010-04-19 05:07:33 +00001876 struct softnet_data *rps_ipi_list;
1877
1878 /* Elements below can be accessed between CPUs for RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001879 struct call_single_data csd ____cacheline_aligned_in_smp;
Eric Dumazet88751272010-04-19 05:07:33 +00001880 struct softnet_data *rps_ipi_next;
1881 unsigned int cpu;
Tom Herbertfec5e652010-04-16 16:01:27 -07001882 unsigned int input_queue_head;
Tom Herbert76cc8b12010-05-20 18:37:59 +00001883 unsigned int input_queue_tail;
Tom Herbert1e94d722010-03-18 17:45:44 -07001884#endif
Eric Dumazet95c96172012-04-15 05:58:06 +00001885 unsigned int dropped;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001886 struct sk_buff_head input_pkt_queue;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001887 struct napi_struct backlog;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001888
1889#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04001890 struct sd_flow_limit __rcu *flow_limit;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001891#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892};
1893
Tom Herbert76cc8b12010-05-20 18:37:59 +00001894static inline void input_queue_head_incr(struct softnet_data *sd)
Tom Herbertfec5e652010-04-16 16:01:27 -07001895{
1896#ifdef CONFIG_RPS
Tom Herbert76cc8b12010-05-20 18:37:59 +00001897 sd->input_queue_head++;
1898#endif
1899}
1900
1901static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1902 unsigned int *qtail)
1903{
1904#ifdef CONFIG_RPS
1905 *qtail = ++sd->input_queue_tail;
Tom Herbertfec5e652010-04-16 16:01:27 -07001906#endif
1907}
1908
Tom Herbert0a9627f2010-03-16 08:03:29 +00001909DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
David S. Miller37437bb2008-07-16 02:15:04 -07001911extern void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -07001912
1913static inline void netif_schedule_queue(struct netdev_queue *txq)
1914{
Tom Herbert734664982011-11-28 16:32:44 +00001915 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
David S. Miller37437bb2008-07-16 02:15:04 -07001916 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -07001917}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001919static inline void netif_tx_schedule_all(struct net_device *dev)
1920{
1921 unsigned int i;
1922
1923 for (i = 0; i < dev->num_tx_queues; i++)
1924 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1925}
1926
Dave Jonesd29f7492008-07-22 14:09:06 -07001927static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1928{
Tom Herbert734664982011-11-28 16:32:44 +00001929 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07001930}
1931
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001932/**
1933 * netif_start_queue - allow transmit
1934 * @dev: network device
1935 *
1936 * Allow upper layers to call the device hard_start_xmit routine.
1937 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938static inline void netif_start_queue(struct net_device *dev)
1939{
David S. Millere8a04642008-07-17 00:34:19 -07001940 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941}
1942
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001943static inline void netif_tx_start_all_queues(struct net_device *dev)
1944{
1945 unsigned int i;
1946
1947 for (i = 0; i < dev->num_tx_queues; i++) {
1948 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1949 netif_tx_start_queue(txq);
1950 }
1951}
1952
David S. Miller79d16382008-07-08 23:14:46 -07001953static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954{
1955#ifdef CONFIG_NETPOLL_TRAP
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001956 if (netpoll_trap()) {
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00001957 netif_tx_start_queue(dev_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 return;
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960#endif
Tom Herbert734664982011-11-28 16:32:44 +00001961 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001962 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07001963}
1964
Dave Jonesd29f7492008-07-22 14:09:06 -07001965/**
1966 * netif_wake_queue - restart transmit
1967 * @dev: network device
1968 *
1969 * Allow upper layers to call the device hard_start_xmit routine.
1970 * Used for flow control when transmit resources are available.
1971 */
David S. Miller79d16382008-07-08 23:14:46 -07001972static inline void netif_wake_queue(struct net_device *dev)
1973{
David S. Millere8a04642008-07-17 00:34:19 -07001974 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975}
1976
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001977static inline void netif_tx_wake_all_queues(struct net_device *dev)
1978{
1979 unsigned int i;
1980
1981 for (i = 0; i < dev->num_tx_queues; i++) {
1982 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1983 netif_tx_wake_queue(txq);
1984 }
1985}
1986
Dave Jonesd29f7492008-07-22 14:09:06 -07001987static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1988{
Guillaume Chazarain18543a62010-11-06 06:39:32 +00001989 if (WARN_ON(!dev_queue)) {
Joe Perches256ee432011-03-01 07:06:12 +00001990 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
Guillaume Chazarain18543a62010-11-06 06:39:32 +00001991 return;
1992 }
Tom Herbert734664982011-11-28 16:32:44 +00001993 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07001994}
1995
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001996/**
1997 * netif_stop_queue - stop transmitted packets
1998 * @dev: network device
1999 *
2000 * Stop upper layers calling the device hard_start_xmit routine.
2001 * Used for flow control when transmit resources are unavailable.
2002 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003static inline void netif_stop_queue(struct net_device *dev)
2004{
David S. Millere8a04642008-07-17 00:34:19 -07002005 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006}
2007
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002008static inline void netif_tx_stop_all_queues(struct net_device *dev)
2009{
2010 unsigned int i;
2011
2012 for (i = 0; i < dev->num_tx_queues; i++) {
2013 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2014 netif_tx_stop_queue(txq);
2015 }
2016}
2017
David S. Miller4d295152012-03-07 21:02:35 -05002018static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07002019{
Tom Herbert734664982011-11-28 16:32:44 +00002020 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07002021}
2022
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002023/**
2024 * netif_queue_stopped - test if transmit queue is flowblocked
2025 * @dev: network device
2026 *
2027 * Test if transmit queue on device is currently unable to send.
2028 */
David S. Miller4d295152012-03-07 21:02:35 -05002029static inline bool netif_queue_stopped(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
David S. Millere8a04642008-07-17 00:34:19 -07002031 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032}
2033
David S. Miller4d295152012-03-07 21:02:35 -05002034static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
David S. Millerc3f26a22008-07-31 16:58:50 -07002035{
Tom Herbert734664982011-11-28 16:32:44 +00002036 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2037}
2038
David S. Miller4d295152012-03-07 21:02:35 -05002039static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
Tom Herbert734664982011-11-28 16:32:44 +00002040{
2041 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2042}
2043
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002044static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2045 unsigned int bytes)
2046{
Tom Herbert114cf582011-11-28 16:33:09 +00002047#ifdef CONFIG_BQL
2048 dql_queued(&dev_queue->dql, bytes);
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002049
2050 if (likely(dql_avail(&dev_queue->dql) >= 0))
2051 return;
2052
2053 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2054
2055 /*
2056 * The XOFF flag must be set before checking the dql_avail below,
2057 * because in netdev_tx_completed_queue we update the dql_completed
2058 * before checking the XOFF flag.
2059 */
2060 smp_mb();
2061
2062 /* check again in case another CPU has just made room avail */
2063 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2064 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002065#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002066}
2067
2068static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2069{
2070 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2071}
2072
2073static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
Eric Dumazet95c96172012-04-15 05:58:06 +00002074 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002075{
Tom Herbert114cf582011-11-28 16:33:09 +00002076#ifdef CONFIG_BQL
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002077 if (unlikely(!bytes))
2078 return;
2079
2080 dql_completed(&dev_queue->dql, bytes);
2081
2082 /*
2083 * Without the memory barrier there is a small possiblity that
2084 * netdev_tx_sent_queue will miss the update and cause the queue to
2085 * be stopped forever
2086 */
2087 smp_mb();
2088
2089 if (dql_avail(&dev_queue->dql) < 0)
2090 return;
2091
2092 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2093 netif_schedule_queue(dev_queue);
Tom Herbert114cf582011-11-28 16:33:09 +00002094#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002095}
2096
2097static inline void netdev_completed_queue(struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00002098 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002099{
2100 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2101}
2102
2103static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2104{
Tom Herbert114cf582011-11-28 16:33:09 +00002105#ifdef CONFIG_BQL
Alexander Duyck5c490352012-02-07 02:29:01 +00002106 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002107 dql_reset(&q->dql);
2108#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002109}
2110
2111static inline void netdev_reset_queue(struct net_device *dev_queue)
2112{
2113 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
David S. Millerc3f26a22008-07-31 16:58:50 -07002114}
2115
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002116/**
2117 * netif_running - test if up
2118 * @dev: network device
2119 *
2120 * Test if the device has been brought up.
2121 */
David S. Miller4d295152012-03-07 21:02:35 -05002122static inline bool netif_running(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123{
2124 return test_bit(__LINK_STATE_START, &dev->state);
2125}
2126
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002127/*
2128 * Routines to manage the subqueues on a device. We only need start
2129 * stop, and a check if it's stopped. All other device management is
2130 * done at the overall netdevice level.
2131 * Also test the device if we're multiqueue.
2132 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002133
2134/**
2135 * netif_start_subqueue - allow sending packets on subqueue
2136 * @dev: network device
2137 * @queue_index: sub queue index
2138 *
2139 * Start individual transmit queue of a device with multiple transmit queues.
2140 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002141static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2142{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002143 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002144
2145 netif_tx_start_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002146}
2147
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002148/**
2149 * netif_stop_subqueue - stop sending packets on subqueue
2150 * @dev: network device
2151 * @queue_index: sub queue index
2152 *
2153 * Stop individual transmit queue of a device with multiple transmit queues.
2154 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002155static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2156{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002157 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002158#ifdef CONFIG_NETPOLL_TRAP
2159 if (netpoll_trap())
2160 return;
2161#endif
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002162 netif_tx_stop_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002163}
2164
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002165/**
2166 * netif_subqueue_stopped - test status of subqueue
2167 * @dev: network device
2168 * @queue_index: sub queue index
2169 *
2170 * Check individual transmit queue of a device with multiple transmit queues.
2171 */
David S. Miller4d295152012-03-07 21:02:35 -05002172static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2173 u16 queue_index)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002174{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002175 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002176
2177 return netif_tx_queue_stopped(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002178}
2179
David S. Miller4d295152012-03-07 21:02:35 -05002180static inline bool netif_subqueue_stopped(const struct net_device *dev,
2181 struct sk_buff *skb)
Pavel Emelyanov668f8952007-10-21 17:01:56 -07002182{
2183 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2184}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002185
2186/**
2187 * netif_wake_subqueue - allow sending packets on subqueue
2188 * @dev: network device
2189 * @queue_index: sub queue index
2190 *
2191 * Resume individual transmit queue of a device with multiple transmit queues.
2192 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002193static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2194{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002195 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002196#ifdef CONFIG_NETPOLL_TRAP
2197 if (netpoll_trap())
2198 return;
2199#endif
Tom Herbert734664982011-11-28 16:32:44 +00002200 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07002201 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002202}
2203
Alexander Duyck537c00d2013-01-10 08:57:02 +00002204#ifdef CONFIG_XPS
Alexander Duyck537c00d2013-01-10 08:57:02 +00002205extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
2206 u16 index);
2207#else
2208static inline int netif_set_xps_queue(struct net_device *dev,
2209 struct cpumask *mask,
2210 u16 index)
2211{
2212 return 0;
2213}
2214#endif
2215
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002216/*
2217 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2218 * as a distribution range limit for the returned value.
2219 */
2220static inline u16 skb_tx_hash(const struct net_device *dev,
2221 const struct sk_buff *skb)
2222{
2223 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2224}
2225
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002226/**
2227 * netif_is_multiqueue - test if device has multiple transmit queues
2228 * @dev: network device
2229 *
2230 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002231 */
David S. Miller4d295152012-03-07 21:02:35 -05002232static inline bool netif_is_multiqueue(const struct net_device *dev)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002233{
Eric Dumazeta02cec22010-09-22 20:43:57 +00002234 return dev->num_tx_queues > 1;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002235}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
Tom Herberte6484932010-10-18 18:04:39 +00002237extern int netif_set_real_num_tx_queues(struct net_device *dev,
2238 unsigned int txq);
John Fastabendf0796d52010-07-01 13:21:57 +00002239
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002240#ifdef CONFIG_RPS
2241extern int netif_set_real_num_rx_queues(struct net_device *dev,
2242 unsigned int rxq);
2243#else
2244static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2245 unsigned int rxq)
2246{
2247 return 0;
2248}
2249#endif
2250
Ben Hutchings3171d022010-09-27 08:24:49 +00002251static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2252 const struct net_device *from_dev)
2253{
Jiri Pirkoee6ae1a2012-07-20 02:28:46 +00002254 int err;
2255
2256 err = netif_set_real_num_tx_queues(to_dev,
2257 from_dev->real_num_tx_queues);
2258 if (err)
2259 return err;
Ben Hutchings3171d022010-09-27 08:24:49 +00002260#ifdef CONFIG_RPS
2261 return netif_set_real_num_rx_queues(to_dev,
2262 from_dev->real_num_rx_queues);
2263#else
2264 return 0;
2265#endif
2266}
2267
Yuval Mintz16917b82012-07-01 03:18:50 +00002268#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2269extern int netif_get_num_default_rss_queues(void);
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271/* Use this variant when it is known for sure that it
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07002272 * is executing from hardware interrupt context or with hardware interrupts
2273 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002275extern void dev_kfree_skb_irq(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
2277/* Use this variant in places where it could be invoked
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07002278 * from either hardware interrupt or other context, with hardware interrupts
2279 * either disabled or enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002281extern void dev_kfree_skb_any(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283extern int netif_rx(struct sk_buff *skb);
2284extern int netif_rx_ni(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285extern int netif_receive_skb(struct sk_buff *skb);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002286extern gro_result_t napi_gro_receive(struct napi_struct *napi,
Herbert Xud565b0a2008-12-15 23:38:52 -08002287 struct sk_buff *skb);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00002288extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
Herbert Xu76620aa2009-04-16 02:02:07 -07002289extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002290extern gro_result_t napi_gro_frags(struct napi_struct *napi);
Herbert Xu76620aa2009-04-16 02:02:07 -07002291
2292static inline void napi_free_frags(struct napi_struct *napi)
2293{
2294 kfree_skb(napi->skb);
2295 napi->skb = NULL;
2296}
2297
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00002298extern int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00002299 rx_handler_func_t *rx_handler,
2300 void *rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00002301extern void netdev_rx_handler_unregister(struct net_device *dev);
2302
David S. Miller95f050b2012-03-06 16:12:15 -05002303extern bool dev_valid_name(const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002304extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2305extern int dev_ethtool(struct net *net, struct ifreq *);
Eric Dumazet95c96172012-04-15 05:58:06 +00002306extern unsigned int dev_get_flags(const struct net_device *);
Patrick McHardybd380812010-02-26 06:34:53 +00002307extern int __dev_change_flags(struct net_device *, unsigned int flags);
Eric Dumazet95c96172012-04-15 05:58:06 +00002308extern int dev_change_flags(struct net_device *, unsigned int);
Patrick McHardybd380812010-02-26 06:34:53 +00002309extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07002310extern int dev_change_name(struct net_device *, const char *);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07002311extern int dev_set_alias(struct net_device *, const char *, size_t);
Eric W. Biedermance286d32007-09-12 13:53:49 +02002312extern int dev_change_net_namespace(struct net_device *,
2313 struct net *, const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314extern int dev_set_mtu(struct net_device *, int);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00002315extern void dev_set_group(struct net_device *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316extern int dev_set_mac_address(struct net_device *,
2317 struct sockaddr *);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00002318extern int dev_change_carrier(struct net_device *,
2319 bool new_carrier);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002320extern int dev_hard_start_xmit(struct sk_buff *skb,
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002321 struct net_device *dev,
2322 struct netdev_queue *txq);
Arnd Bergmann44540962009-11-26 06:07:08 +00002323extern int dev_forward_skb(struct net_device *dev,
2324 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002326extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328/* Called by rtnetlink.c:rtnl_unlock() */
2329extern void netdev_run_todo(void);
2330
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002331/**
2332 * dev_put - release reference to device
2333 * @dev: network device
2334 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002335 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002336 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337static inline void dev_put(struct net_device *dev)
2338{
Christoph Lameter933393f2011-12-22 11:58:51 -06002339 this_cpu_dec(*dev->pcpu_refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340}
2341
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002342/**
2343 * dev_hold - get reference to device
2344 * @dev: network device
2345 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002346 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002347 */
Stephen Hemminger15333062006-03-20 22:32:28 -08002348static inline void dev_hold(struct net_device *dev)
2349{
Christoph Lameter933393f2011-12-22 11:58:51 -06002350 this_cpu_inc(*dev->pcpu_refcnt);
Stephen Hemminger15333062006-03-20 22:32:28 -08002351}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352
2353/* Carrier loss detection, dial on demand. The functions netif_carrier_on
2354 * and _off may be called from IRQ context, but it is caller
2355 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08002356 *
2357 * The name carrier is inappropriate, these functions should really be
2358 * called netif_lowerlayer_*() because they represent the state of any
2359 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 */
2361
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01002362extern void linkwatch_init_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363extern void linkwatch_fire_event(struct net_device *dev);
Eric Dumazete014deb2009-11-17 05:59:21 +00002364extern void linkwatch_forget_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002366/**
2367 * netif_carrier_ok - test if carrier present
2368 * @dev: network device
2369 *
2370 * Check if carrier is present on device
2371 */
David S. Miller4d295152012-03-07 21:02:35 -05002372static inline bool netif_carrier_ok(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
2374 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2375}
2376
Eric Dumazet9d214932009-05-17 20:55:16 -07002377extern unsigned long dev_trans_start(struct net_device *dev);
2378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379extern void __netdev_watchdog_up(struct net_device *dev);
2380
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07002381extern void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07002383extern void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002385/**
2386 * netif_dormant_on - mark device as dormant.
2387 * @dev: network device
2388 *
2389 * Mark device as dormant (as per RFC2863).
2390 *
2391 * The dormant state indicates that the relevant interface is not
2392 * actually in a condition to pass packets (i.e., it is not 'up') but is
2393 * in a "pending" state, waiting for some external event. For "on-
2394 * demand" interfaces, this new state identifies the situation where the
2395 * interface is waiting for events to place it in the up state.
2396 *
2397 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002398static inline void netif_dormant_on(struct net_device *dev)
2399{
2400 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2401 linkwatch_fire_event(dev);
2402}
2403
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002404/**
2405 * netif_dormant_off - set device as not dormant.
2406 * @dev: network device
2407 *
2408 * Device is not in dormant state.
2409 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002410static inline void netif_dormant_off(struct net_device *dev)
2411{
2412 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2413 linkwatch_fire_event(dev);
2414}
2415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002416/**
2417 * netif_dormant - test if carrier present
2418 * @dev: network device
2419 *
2420 * Check if carrier is present on device
2421 */
David S. Miller4d295152012-03-07 21:02:35 -05002422static inline bool netif_dormant(const struct net_device *dev)
Stefan Rompfb00055a2006-03-20 17:09:11 -08002423{
2424 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2425}
2426
2427
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002428/**
2429 * netif_oper_up - test if device is operational
2430 * @dev: network device
2431 *
2432 * Check if carrier is operational
2433 */
David S. Miller4d295152012-03-07 21:02:35 -05002434static inline bool netif_oper_up(const struct net_device *dev)
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08002435{
Stefan Rompfb00055a2006-03-20 17:09:11 -08002436 return (dev->operstate == IF_OPER_UP ||
2437 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2438}
2439
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002440/**
2441 * netif_device_present - is device available or removed
2442 * @dev: network device
2443 *
2444 * Check if device has not been removed from system.
2445 */
David S. Miller4d295152012-03-07 21:02:35 -05002446static inline bool netif_device_present(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447{
2448 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2449}
2450
Denis Vlasenko56079432006-03-29 15:57:29 -08002451extern void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Denis Vlasenko56079432006-03-29 15:57:29 -08002453extern void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
2455/*
2456 * Network interface message level settings
2457 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
2459enum {
2460 NETIF_MSG_DRV = 0x0001,
2461 NETIF_MSG_PROBE = 0x0002,
2462 NETIF_MSG_LINK = 0x0004,
2463 NETIF_MSG_TIMER = 0x0008,
2464 NETIF_MSG_IFDOWN = 0x0010,
2465 NETIF_MSG_IFUP = 0x0020,
2466 NETIF_MSG_RX_ERR = 0x0040,
2467 NETIF_MSG_TX_ERR = 0x0080,
2468 NETIF_MSG_TX_QUEUED = 0x0100,
2469 NETIF_MSG_INTR = 0x0200,
2470 NETIF_MSG_TX_DONE = 0x0400,
2471 NETIF_MSG_RX_STATUS = 0x0800,
2472 NETIF_MSG_PKTDATA = 0x1000,
2473 NETIF_MSG_HW = 0x2000,
2474 NETIF_MSG_WOL = 0x4000,
2475};
2476
2477#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2478#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2479#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2480#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2481#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2482#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2483#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2484#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2485#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2486#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2487#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2488#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2489#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2490#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2491#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2492
2493static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2494{
2495 /* use default */
2496 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2497 return default_msg_enable_bits;
2498 if (debug_value == 0) /* no output */
2499 return 0;
2500 /* set low N bits */
2501 return (1 << debug_value) - 1;
2502}
2503
David S. Millerc773e842008-07-08 23:13:53 -07002504static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07002505{
David S. Millerc773e842008-07-08 23:13:53 -07002506 spin_lock(&txq->_xmit_lock);
2507 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002508}
2509
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002510static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2511{
2512 spin_lock_bh(&txq->_xmit_lock);
2513 txq->xmit_lock_owner = smp_processor_id();
2514}
2515
David S. Miller4d295152012-03-07 21:02:35 -05002516static inline bool __netif_tx_trylock(struct netdev_queue *txq)
David S. Millerc773e842008-07-08 23:13:53 -07002517{
David S. Miller4d295152012-03-07 21:02:35 -05002518 bool ok = spin_trylock(&txq->_xmit_lock);
David S. Millerc773e842008-07-08 23:13:53 -07002519 if (likely(ok))
2520 txq->xmit_lock_owner = smp_processor_id();
2521 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07002522}
2523
David S. Millerc773e842008-07-08 23:13:53 -07002524static inline void __netif_tx_unlock(struct netdev_queue *txq)
2525{
2526 txq->xmit_lock_owner = -1;
2527 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07002528}
2529
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002530static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2531{
2532 txq->xmit_lock_owner = -1;
2533 spin_unlock_bh(&txq->_xmit_lock);
2534}
2535
Eric Dumazet08baf562009-05-25 22:58:01 -07002536static inline void txq_trans_update(struct netdev_queue *txq)
2537{
2538 if (txq->xmit_lock_owner != -1)
2539 txq->trans_start = jiffies;
2540}
2541
David S. Millerc3f26a22008-07-31 16:58:50 -07002542/**
2543 * netif_tx_lock - grab network device transmit lock
2544 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07002545 *
2546 * Get network device transmit lock
2547 */
2548static inline void netif_tx_lock(struct net_device *dev)
2549{
2550 unsigned int i;
2551 int cpu;
2552
2553 spin_lock(&dev->tx_global_lock);
2554 cpu = smp_processor_id();
2555 for (i = 0; i < dev->num_tx_queues; i++) {
2556 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2557
2558 /* We are the only thread of execution doing a
2559 * freeze, but we have to grab the _xmit_lock in
2560 * order to synchronize with threads which are in
2561 * the ->hard_start_xmit() handler and already
2562 * checked the frozen bit.
2563 */
2564 __netif_tx_lock(txq, cpu);
2565 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2566 __netif_tx_unlock(txq);
2567 }
2568}
2569
2570static inline void netif_tx_lock_bh(struct net_device *dev)
2571{
2572 local_bh_disable();
2573 netif_tx_lock(dev);
2574}
2575
Herbert Xu932ff272006-06-09 12:20:56 -07002576static inline void netif_tx_unlock(struct net_device *dev)
2577{
David S. Millere8a04642008-07-17 00:34:19 -07002578 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07002579
David S. Millere8a04642008-07-17 00:34:19 -07002580 for (i = 0; i < dev->num_tx_queues; i++) {
2581 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07002582
David S. Millerc3f26a22008-07-31 16:58:50 -07002583 /* No need to grab the _xmit_lock here. If the
2584 * queue is not stopped for another reason, we
2585 * force a schedule.
2586 */
2587 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002588 netif_schedule_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07002589 }
2590 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07002591}
2592
2593static inline void netif_tx_unlock_bh(struct net_device *dev)
2594{
David S. Millere8a04642008-07-17 00:34:19 -07002595 netif_tx_unlock(dev);
2596 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07002597}
2598
David S. Millerc773e842008-07-08 23:13:53 -07002599#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002600 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07002601 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002602 } \
2603}
2604
David S. Millerc773e842008-07-08 23:13:53 -07002605#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002606 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07002607 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002608 } \
2609}
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611static inline void netif_tx_disable(struct net_device *dev)
2612{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002613 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07002614 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002615
David S. Millerc3f26a22008-07-31 16:58:50 -07002616 local_bh_disable();
2617 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002618 for (i = 0; i < dev->num_tx_queues; i++) {
2619 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07002620
2621 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002622 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07002623 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002624 }
David S. Millerc3f26a22008-07-31 16:58:50 -07002625 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626}
2627
David S. Millere308a5d2008-07-15 00:13:44 -07002628static inline void netif_addr_lock(struct net_device *dev)
2629{
2630 spin_lock(&dev->addr_list_lock);
2631}
2632
Jiri Pirko2429f7a2012-01-09 06:36:54 +00002633static inline void netif_addr_lock_nested(struct net_device *dev)
2634{
2635 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2636}
2637
David S. Millere308a5d2008-07-15 00:13:44 -07002638static inline void netif_addr_lock_bh(struct net_device *dev)
2639{
2640 spin_lock_bh(&dev->addr_list_lock);
2641}
2642
2643static inline void netif_addr_unlock(struct net_device *dev)
2644{
2645 spin_unlock(&dev->addr_list_lock);
2646}
2647
2648static inline void netif_addr_unlock_bh(struct net_device *dev)
2649{
2650 spin_unlock_bh(&dev->addr_list_lock);
2651}
2652
Jiri Pirkof001fde2009-05-05 02:48:28 +00002653/*
Jiri Pirko31278e72009-06-17 01:12:19 +00002654 * dev_addrs walker. Should be used only for read access. Call with
Jiri Pirkof001fde2009-05-05 02:48:28 +00002655 * rcu_read_lock held.
2656 */
2657#define for_each_dev_addr(dev, ha) \
Jiri Pirko31278e72009-06-17 01:12:19 +00002658 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00002659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660/* These functions live elsewhere (drivers/net/net_init.c, but related) */
2661
2662extern void ether_setup(struct net_device *dev);
2663
2664/* Support for loadable net-drivers */
Tom Herbert36909ea2011-01-09 19:36:31 +00002665extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002666 void (*setup)(struct net_device *),
Tom Herbert36909ea2011-01-09 19:36:31 +00002667 unsigned int txqs, unsigned int rxqs);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002668#define alloc_netdev(sizeof_priv, name, setup) \
Tom Herbert36909ea2011-01-09 19:36:31 +00002669 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2670
2671#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2672 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674extern int register_netdev(struct net_device *dev);
2675extern void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00002676
Jiri Pirko22bedad32010-04-01 21:22:57 +00002677/* General hardware address lists handling functions */
2678extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2679 struct netdev_hw_addr_list *from_list,
2680 int addr_len, unsigned char addr_type);
2681extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2682 struct netdev_hw_addr_list *from_list,
2683 int addr_len, unsigned char addr_type);
2684extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2685 struct netdev_hw_addr_list *from_list,
2686 int addr_len);
2687extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2688 struct netdev_hw_addr_list *from_list,
2689 int addr_len);
2690extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2691extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2692
Jiri Pirkof001fde2009-05-05 02:48:28 +00002693/* Functions used for device addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002694extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
Jiri Pirkof001fde2009-05-05 02:48:28 +00002695 unsigned char addr_type);
stephen hemminger6b6e2722012-09-17 10:03:26 +00002696extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
Jiri Pirkof001fde2009-05-05 02:48:28 +00002697 unsigned char addr_type);
2698extern int dev_addr_add_multiple(struct net_device *to_dev,
2699 struct net_device *from_dev,
2700 unsigned char addr_type);
2701extern int dev_addr_del_multiple(struct net_device *to_dev,
2702 struct net_device *from_dev,
2703 unsigned char addr_type);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002704extern void dev_addr_flush(struct net_device *dev);
2705extern int dev_addr_init(struct net_device *dev);
2706
2707/* Functions used for unicast addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002708extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2709extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2710extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002711extern int dev_uc_sync(struct net_device *to, struct net_device *from);
Vlad Yasevich4cd729b02013-04-15 09:54:25 +00002712extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002713extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2714extern void dev_uc_flush(struct net_device *dev);
2715extern void dev_uc_init(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00002716
Jiri Pirko22bedad32010-04-01 21:22:57 +00002717/* Functions used for multicast addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002718extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2719extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2720extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2721extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2722extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
Jiri Pirko22bedad32010-04-01 21:22:57 +00002723extern int dev_mc_sync(struct net_device *to, struct net_device *from);
Vlad Yasevich4cd729b02013-04-15 09:54:25 +00002724extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
Jiri Pirko22bedad32010-04-01 21:22:57 +00002725extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2726extern void dev_mc_flush(struct net_device *dev);
2727extern void dev_mc_init(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002728
2729/* Functions used for secondary unicast and multicast support */
2730extern void dev_set_rx_mode(struct net_device *dev);
2731extern void __dev_set_rx_mode(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002732extern int dev_set_promiscuity(struct net_device *dev, int inc);
2733extern int dev_set_allmulti(struct net_device *dev, int inc);
2734extern void netdev_state_change(struct net_device *dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00002735extern void netdev_notify_peers(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002736extern void netdev_features_change(struct net_device *dev);
2737/* Load a device via the kmod */
2738extern void dev_load(struct net *net, const char *name);
Ben Hutchingsd7753512010-07-09 09:12:41 +00002739extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2740 struct rtnl_link_stats64 *storage);
Eric Dumazet77a1abf2012-03-05 04:50:09 +00002741extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2742 const struct net_device_stats *netdev_stats);
Herbert Xufb286bb2005-11-10 13:01:24 -08002743
2744extern int netdev_max_backlog;
Eric Dumazet3b098e22010-05-15 23:57:10 -07002745extern int netdev_tstamp_prequeue;
Herbert Xufb286bb2005-11-10 13:01:24 -08002746extern int weight_p;
Eric Dumazet0a148422011-04-20 09:27:32 +00002747extern int bpf_jit_enable;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00002748
2749extern bool netdev_has_upper_dev(struct net_device *dev,
2750 struct net_device *upper_dev);
2751extern bool netdev_has_any_upper_dev(struct net_device *dev);
2752extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2753extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2754extern int netdev_upper_dev_link(struct net_device *dev,
2755 struct net_device *upper_dev);
2756extern int netdev_master_upper_dev_link(struct net_device *dev,
2757 struct net_device *upper_dev);
2758extern void netdev_upper_dev_unlink(struct net_device *dev,
2759 struct net_device *upper_dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002760extern int skb_checksum_help(struct sk_buff *skb);
Cong Wang12b00042013-02-05 16:36:38 +00002761extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2762 netdev_features_t features, bool tx_path);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002763extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2764 netdev_features_t features);
Cong Wang12b00042013-02-05 16:36:38 +00002765
2766static inline
2767struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2768{
2769 return __skb_gso_segment(skb, features, true);
2770}
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002771__be16 skb_network_protocol(struct sk_buff *skb);
2772
2773static inline bool can_checksum_protocol(netdev_features_t features,
2774 __be16 protocol)
2775{
2776 return ((features & NETIF_F_GEN_CSUM) ||
2777 ((features & NETIF_F_V4_CSUM) &&
2778 protocol == htons(ETH_P_IP)) ||
2779 ((features & NETIF_F_V6_CSUM) &&
2780 protocol == htons(ETH_P_IPV6)) ||
2781 ((features & NETIF_F_FCOE_CRC) &&
2782 protocol == htons(ETH_P_FCOE)));
2783}
Cong Wang12b00042013-02-05 16:36:38 +00002784
Herbert Xufb286bb2005-11-10 13:01:24 -08002785#ifdef CONFIG_BUG
2786extern void netdev_rx_csum_fault(struct net_device *dev);
2787#else
2788static inline void netdev_rx_csum_fault(struct net_device *dev)
2789{
2790}
2791#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792/* rx skb timestamps */
2793extern void net_enable_timestamp(void);
2794extern void net_disable_timestamp(void);
2795
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002796#ifdef CONFIG_PROC_FS
Cong Wang900ff8c2013-02-18 19:20:33 +00002797extern int __init dev_proc_init(void);
2798#else
2799#define dev_proc_init() 0
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002800#endif
2801
Jay Vosburghb8a97872008-06-13 18:12:04 -07002802extern int netdev_class_create_file(struct class_attribute *class_attr);
2803extern void netdev_class_remove_file(struct class_attribute *class_attr);
2804
Johannes Berg04600792010-08-05 17:45:15 +02002805extern struct kobj_ns_type_operations net_ns_type_operations;
2806
David S. Miller3019de12011-06-06 16:41:33 -07002807extern const char *netdev_drivername(const struct net_device *dev);
Arjan van de Ven6579e572008-07-21 13:31:48 -07002808
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002809extern void linkwatch_run_queue(void);
2810
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002811static inline netdev_features_t netdev_get_wanted_features(
2812 struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00002813{
2814 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2815}
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002816netdev_features_t netdev_increment_features(netdev_features_t all,
2817 netdev_features_t one, netdev_features_t mask);
Eric Dumazetb0ce3502013-05-16 07:34:53 +00002818
2819/* Allow TSO being used on stacked device :
2820 * Performing the GSO segmentation before last device
2821 * is a performance improvement.
2822 */
2823static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2824 netdev_features_t mask)
2825{
2826 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2827}
2828
Michał Mirosław6cb6a272011-04-02 22:48:47 -07002829int __netdev_update_features(struct net_device *dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00002830void netdev_update_features(struct net_device *dev);
Michał Mirosławafe12cc2011-05-07 03:22:17 +00002831void netdev_change_features(struct net_device *dev);
Herbert Xu7f353bf2007-08-10 15:47:58 -07002832
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08002833void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2834 struct net_device *dev);
2835
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002836netdev_features_t netif_skb_features(struct sk_buff *skb);
Jesse Gross58e998c2010-10-29 12:14:55 +00002837
David S. Miller4d295152012-03-07 21:02:35 -05002838static inline bool net_gso_ok(netdev_features_t features, int gso_type)
Herbert Xubcd76112006-06-30 13:36:35 -07002839{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002840 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
Michał Mirosław0345e182011-11-16 14:05:33 +00002841
2842 /* check flags correspondence */
2843 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2844 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2845 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2846 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2847 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2848 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2849
Herbert Xubcd76112006-06-30 13:36:35 -07002850 return (features & feature) == feature;
2851}
2852
David S. Miller4d295152012-03-07 21:02:35 -05002853static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
Herbert Xu576a30e2006-06-27 13:22:38 -07002854{
Herbert Xu278b2512009-06-03 21:20:51 -07002855 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
David S. Miller21dc3302010-08-23 00:13:46 -07002856 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07002857}
2858
David S. Miller4d295152012-03-07 21:02:35 -05002859static inline bool netif_needs_gso(struct sk_buff *skb,
2860 netdev_features_t features)
Herbert Xu79671682006-06-22 02:40:14 -07002861{
Jesse Grossfc741212011-01-09 06:23:32 +00002862 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
Yi Zoucdbee742012-03-16 23:08:11 +00002863 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2864 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
Herbert Xu79671682006-06-22 02:40:14 -07002865}
2866
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002867static inline void netif_set_gso_max_size(struct net_device *dev,
2868 unsigned int size)
2869{
2870 dev->gso_max_size = size;
2871}
2872
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00002873static inline bool netif_is_bond_master(struct net_device *dev)
2874{
2875 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2876}
2877
David S. Miller4d295152012-03-07 21:02:35 -05002878static inline bool netif_is_bond_slave(struct net_device *dev)
Jiri Pirko1765a572011-02-12 06:48:36 +00002879{
2880 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2881}
2882
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002883static inline bool netif_supports_nofcs(struct net_device *dev)
2884{
2885 return dev->priv_flags & IFF_SUPP_NOFCS;
2886}
2887
Eric W. Biederman505d4f72008-11-07 22:54:20 -08002888extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00002889
Joe Perches571ba422010-02-09 11:49:47 +00002890/* Logging, debugging and troubleshooting/diagnostic helpers. */
2891
2892/* netdev_printk helpers, similar to dev_printk */
2893
2894static inline const char *netdev_name(const struct net_device *dev)
2895{
2896 if (dev->reg_state != NETREG_REGISTERED)
2897 return "(unregistered net_device)";
2898 return dev->name;
2899}
2900
Joe Perchesb9075fa2011-10-31 17:11:33 -07002901extern __printf(3, 4)
2902int netdev_printk(const char *level, const struct net_device *dev,
2903 const char *format, ...);
2904extern __printf(2, 3)
2905int netdev_emerg(const struct net_device *dev, const char *format, ...);
2906extern __printf(2, 3)
2907int netdev_alert(const struct net_device *dev, const char *format, ...);
2908extern __printf(2, 3)
2909int netdev_crit(const struct net_device *dev, const char *format, ...);
2910extern __printf(2, 3)
2911int netdev_err(const struct net_device *dev, const char *format, ...);
2912extern __printf(2, 3)
2913int netdev_warn(const struct net_device *dev, const char *format, ...);
2914extern __printf(2, 3)
2915int netdev_notice(const struct net_device *dev, const char *format, ...);
2916extern __printf(2, 3)
2917int netdev_info(const struct net_device *dev, const char *format, ...);
Joe Perches571ba422010-02-09 11:49:47 +00002918
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03002919#define MODULE_ALIAS_NETDEV(device) \
2920 MODULE_ALIAS("netdev-" device)
2921
Jim Cromieb558c962011-12-19 17:11:18 -05002922#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perches571ba422010-02-09 11:49:47 +00002923#define netdev_dbg(__dev, format, args...) \
2924do { \
Jason Baronffa10cb2011-08-11 14:36:48 -04002925 dynamic_netdev_dbg(__dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00002926} while (0)
Jim Cromieb558c962011-12-19 17:11:18 -05002927#elif defined(DEBUG)
2928#define netdev_dbg(__dev, format, args...) \
2929 netdev_printk(KERN_DEBUG, __dev, format, ##args)
Joe Perches571ba422010-02-09 11:49:47 +00002930#else
2931#define netdev_dbg(__dev, format, args...) \
2932({ \
2933 if (0) \
2934 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2935 0; \
2936})
2937#endif
2938
2939#if defined(VERBOSE_DEBUG)
2940#define netdev_vdbg netdev_dbg
2941#else
2942
2943#define netdev_vdbg(dev, format, args...) \
2944({ \
2945 if (0) \
2946 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2947 0; \
2948})
2949#endif
2950
2951/*
2952 * netdev_WARN() acts like dev_printk(), but with the key difference
2953 * of using a WARN/WARN_ON to get the message out, including the
2954 * file/line information and a backtrace.
2955 */
2956#define netdev_WARN(dev, format, args...) \
2957 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2958
Joe Perchesb3d95c52010-02-09 11:49:49 +00002959/* netif printk helpers, similar to netdev_printk */
2960
2961#define netif_printk(priv, type, level, dev, fmt, args...) \
2962do { \
2963 if (netif_msg_##type(priv)) \
2964 netdev_printk(level, (dev), fmt, ##args); \
2965} while (0)
2966
Joe Perchesf45f4322010-06-27 01:02:36 +00002967#define netif_level(level, priv, type, dev, fmt, args...) \
2968do { \
2969 if (netif_msg_##type(priv)) \
2970 netdev_##level(dev, fmt, ##args); \
2971} while (0)
2972
Joe Perchesb3d95c52010-02-09 11:49:49 +00002973#define netif_emerg(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002974 netif_level(emerg, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002975#define netif_alert(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002976 netif_level(alert, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002977#define netif_crit(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002978 netif_level(crit, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002979#define netif_err(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002980 netif_level(err, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002981#define netif_warn(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002982 netif_level(warn, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002983#define netif_notice(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002984 netif_level(notice, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002985#define netif_info(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002986 netif_level(info, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002987
Joe Perches0053ea92012-05-30 07:43:34 +00002988#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002989#define netif_dbg(priv, type, netdev, format, args...) \
2990do { \
2991 if (netif_msg_##type(priv)) \
Jason Baronb5fb0a02011-08-11 14:36:53 -04002992 dynamic_netdev_dbg(netdev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00002993} while (0)
Joe Perches0053ea92012-05-30 07:43:34 +00002994#elif defined(DEBUG)
2995#define netif_dbg(priv, type, dev, format, args...) \
2996 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002997#else
2998#define netif_dbg(priv, type, dev, format, args...) \
2999({ \
3000 if (0) \
3001 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3002 0; \
3003})
3004#endif
3005
3006#if defined(VERBOSE_DEBUG)
Ben Hutchingsbcfcc452010-07-02 07:08:44 +00003007#define netif_vdbg netif_dbg
Joe Perchesb3d95c52010-02-09 11:49:49 +00003008#else
3009#define netif_vdbg(priv, type, dev, format, args...) \
3010({ \
3011 if (0) \
Ben Hutchingsa4ed89c2010-05-18 06:56:32 +00003012 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00003013 0; \
3014})
3015#endif
Joe Perches571ba422010-02-09 11:49:47 +00003016
Cong Wang900ff8c2013-02-18 19:20:33 +00003017/*
3018 * The list of packet types we will receive (as opposed to discard)
3019 * and the routines to invoke.
3020 *
3021 * Why 16. Because with 16 the only overlap we get on a hash of the
3022 * low nibble of the protocol value is RARP/SNAP/X.25.
3023 *
3024 * NOTE: That is no longer true with the addition of VLAN tags. Not
3025 * sure which should go first, but I bet it won't make much
3026 * difference if we are running VLANs. The good news is that
3027 * this protocol won't be in the list unless compiled in, so
3028 * the average user (w/out VLANs) will not be adversely affected.
3029 * --BLG
3030 *
3031 * 0800 IP
3032 * 8100 802.1Q VLAN
3033 * 0001 802.3
3034 * 0002 AX.25
3035 * 0004 802.2
3036 * 8035 RARP
3037 * 0005 SNAP
3038 * 0805 X.25
3039 * 0806 ARP
3040 * 8137 IPX
3041 * 0009 Localtalk
3042 * 86DD IPv6
3043 */
3044#define PTYPE_HASH_SIZE (16)
3045#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3046
Jiri Pirko385a1542009-05-27 15:48:07 -07003047#endif /* _LINUX_NETDEVICE_H */