blob: c8e388e5fcccfa604d087ce33ccee5ace528e598 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070014 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
Jean Pihete8db0be2011-08-25 15:35:03 +020028#include <linux/pm_qos.h>
Al Virod7fe0f22006-12-03 23:15:30 -050029#include <linux/timer.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050030#include <linux/bug.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070031#include <linux/delay.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/cache.h>
34#include <asm/byteorder.h>
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070037#include <linux/rculist.h>
Chris Leechdb217332006-06-17 21:24:58 -070038#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070039#include <linux/workqueue.h>
Tom Herbert114cf582011-11-28 16:33:09 +000040#include <linux/dynamic_queue_limits.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000042#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020043#include <net/net_namespace.h>
Lennert Buytenhekcf85d082008-10-07 13:45:02 +000044#include <net/dsa.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080045#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080046#include <net/dcbnl.h>
47#endif
Neil Horman5bc14212011-11-22 05:10:51 +000048#include <net/netprio_cgroup.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020049
Michał Mirosława59e2ec2011-11-15 15:29:55 +000050#include <linux/netdev_features.h>
John Fastabend77162022012-04-15 06:43:56 +000051#include <linux/neighbour.h>
David Howells607ca462012-10-13 10:46:48 +010052#include <uapi/linux/netdevice.h>
Michał Mirosława59e2ec2011-11-15 15:29:55 +000053
Jeff Moyer115c1d62005-06-22 22:05:31 -070054struct netpoll_info;
Paul Gortmaker313162d2012-01-30 11:46:54 -050055struct device;
Richard Cochranc1f19b52010-07-17 08:49:36 +000056struct phy_device;
Johannes Berg704232c2007-04-23 12:20:05 -070057/* 802.11 specific */
58struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Joe Perchesf629d202013-09-26 14:48:15 -070060void netdev_set_default_ethtool_ops(struct net_device *dev,
61 const struct ethtool_ops *ops);
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +000062
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000063/* Backlog congestion levels */
64#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
65#define NET_RX_DROP 1 /* packet dropped */
66
Patrick McHardy572a9d72009-11-10 06:14:14 +000067/*
68 * Transmit return codes: transmit return codes originate from three different
69 * namespaces:
70 *
71 * - qdisc return codes
72 * - driver transmit return codes
73 * - errno values
74 *
75 * Drivers are allowed to return any one of those in their hard_start_xmit()
76 * function. Real network devices commonly used with qdiscs should only return
77 * the driver transmit return codes though - when qdiscs are used, the actual
78 * transmission happens asynchronously, so the value is not propagated to
79 * higher layers. Virtual network devices transmit synchronously, in this case
80 * the driver transmit return codes are consumed by dev_queue_xmit(), all
81 * others are propagated to higher layers.
82 */
83
84/* qdisc ->enqueue() return codes. */
85#define NET_XMIT_SUCCESS 0x00
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000086#define NET_XMIT_DROP 0x01 /* skb dropped */
87#define NET_XMIT_CN 0x02 /* congestion notification */
88#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
89#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -020091/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
92 * indicates that the device will soon be dropping packets, or already drops
93 * some packets of the same priority; prompting us to send less aggressively. */
Patrick McHardy572a9d72009-11-10 06:14:14 +000094#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
96
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +000097/* Driver transmit return codes */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000098#define NETDEV_TX_MASK 0xf0
Patrick McHardy572a9d72009-11-10 06:14:14 +000099
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000100enum netdev_tx {
Patrick McHardy572a9d72009-11-10 06:14:14 +0000101 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000102 NETDEV_TX_OK = 0x00, /* driver took care of packet */
103 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
104 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000105};
106typedef enum netdev_tx netdev_tx_t;
107
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000108/*
109 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
110 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
111 */
112static inline bool dev_xmit_complete(int rc)
113{
114 /*
115 * Positive cases with an skb consumed by a driver:
116 * - successful transmission (rc == NETDEV_TX_OK)
117 * - error while transmitting (rc < 0)
118 * - error while queueing to a different device (rc & NET_XMIT_MASK)
119 */
120 if (likely(rc < NET_XMIT_MASK))
121 return true;
122
123 return false;
124}
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/*
127 * Compute the worst case header length according to the protocols
128 * used.
129 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800130
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000131#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
David S. Miller8388e3d2008-05-12 20:17:33 -0700132# if defined(CONFIG_MAC80211_MESH)
133# define LL_MAX_HEADER 128
134# else
135# define LL_MAX_HEADER 96
136# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700138# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#endif
140
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000141#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
142 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#define MAX_HEADER LL_MAX_HEADER
144#else
145#define MAX_HEADER (LL_MAX_HEADER + 48)
146#endif
147
148/*
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000149 * Old network device statistics. Fields are native words
150 * (unsigned long) so they can be read and written atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800152
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800153struct net_device_stats {
Ben Hutchings3cfde792010-07-09 09:11:52 +0000154 unsigned long rx_packets;
155 unsigned long tx_packets;
156 unsigned long rx_bytes;
157 unsigned long tx_bytes;
158 unsigned long rx_errors;
159 unsigned long tx_errors;
160 unsigned long rx_dropped;
161 unsigned long tx_dropped;
162 unsigned long multicast;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 unsigned long collisions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 unsigned long rx_length_errors;
Ben Hutchings3cfde792010-07-09 09:11:52 +0000165 unsigned long rx_over_errors;
166 unsigned long rx_crc_errors;
167 unsigned long rx_frame_errors;
168 unsigned long rx_fifo_errors;
169 unsigned long rx_missed_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 unsigned long tx_aborted_errors;
171 unsigned long tx_carrier_errors;
172 unsigned long tx_fifo_errors;
173 unsigned long tx_heartbeat_errors;
174 unsigned long tx_window_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 unsigned long rx_compressed;
176 unsigned long tx_compressed;
177};
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180#include <linux/cache.h>
181#include <linux/skbuff.h>
182
Eric Dumazetadc93002011-11-17 03:13:26 +0000183#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +0100184#include <linux/static_key.h>
185extern struct static_key rps_needed;
Eric Dumazetadc93002011-11-17 03:13:26 +0000186#endif
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188struct neighbour;
189struct neigh_parms;
190struct sk_buff;
191
Jiri Pirkof001fde2009-05-05 02:48:28 +0000192struct netdev_hw_addr {
193 struct list_head list;
194 unsigned char addr[MAX_ADDR_LEN];
195 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000196#define NETDEV_HW_ADDR_T_LAN 1
197#define NETDEV_HW_ADDR_T_SAN 2
198#define NETDEV_HW_ADDR_T_SLAVE 3
199#define NETDEV_HW_ADDR_T_UNICAST 4
Jiri Pirko22bedad32010-04-01 21:22:57 +0000200#define NETDEV_HW_ADDR_T_MULTICAST 5
Jiri Pirko22bedad32010-04-01 21:22:57 +0000201 bool global_use;
Vlad Yasevich4cd729b02013-04-15 09:54:25 +0000202 int sync_cnt;
Eric Dumazet8f8f1032010-09-19 11:24:02 -0700203 int refcount;
Vlad Yasevich4543fbe2013-04-02 17:10:07 -0400204 int synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000205 struct rcu_head rcu_head;
206};
207
Jiri Pirko31278e72009-06-17 01:12:19 +0000208struct netdev_hw_addr_list {
209 struct list_head list;
210 int count;
211};
212
Jiri Pirko22bedad32010-04-01 21:22:57 +0000213#define netdev_hw_addr_list_count(l) ((l)->count)
214#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
215#define netdev_hw_addr_list_for_each(ha, l) \
216 list_for_each_entry(ha, &(l)->list, list)
217
218#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
219#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800220#define netdev_for_each_uc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000221 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800222
Jiri Pirko22bedad32010-04-01 21:22:57 +0000223#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
224#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
Pavel Roskin18e225f2010-04-07 16:40:09 -0700225#define netdev_for_each_mc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000226 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
Jiri Pirko6683ece2010-02-04 10:22:25 -0800227
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800228struct hh_cache {
David S. Millerf6b72b62011-07-14 07:53:20 -0700229 u16 hh_len;
David S. Miller5c25f682011-07-13 00:51:10 -0700230 u16 __pad;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800231 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233 /* cached hardware header; allow for machine alignment needs. */
234#define HH_DATA_MOD 16
235#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700236 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237#define HH_DATA_ALIGN(__len) \
238 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
239 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
240};
241
242/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
243 * Alternative is:
244 * dev->hard_header_len ? (dev->hard_header_len +
245 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
246 *
247 * We could use other alignment values, but we must maintain the
248 * relationship HH alignment <= LL alignment.
249 */
250#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700251 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700253 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700255struct header_ops {
256 int (*create) (struct sk_buff *skb, struct net_device *dev,
257 unsigned short type, const void *daddr,
Eric Dumazet95c96172012-04-15 05:58:06 +0000258 const void *saddr, unsigned int len);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700259 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
260 int (*rebuild)(struct sk_buff *skb);
David S. Millere69dd332011-07-12 23:28:12 -0700261 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700262 void (*cache_update)(struct hh_cache *hh,
263 const struct net_device *dev,
264 const unsigned char *haddr);
265};
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267/* These flag bits are private to the generic network queueing
268 * layer, they may not be explicitly referenced by any other
269 * code.
270 */
271
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800272enum netdev_state_t {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 __LINK_STATE_START,
274 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800276 __LINK_STATE_LINKWATCH_PENDING,
277 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278};
279
280
281/*
282 * This structure holds at boot time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800283 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 */
285struct netdev_boot_setup {
286 char name[IFNAMSIZ];
287 struct ifmap map;
288};
289#define NETDEV_BOOT_SETUP_MAX 8
290
Joe Perchesf629d202013-09-26 14:48:15 -0700291int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700294 * Structure for NAPI scheduling similar to tasklet but with weighting
295 */
296struct napi_struct {
297 /* The poll_list must only be managed by the entity which
298 * changes the state of the NAPI_STATE_SCHED bit. This means
299 * whoever atomically sets that bit can add this napi_struct
300 * to the per-cpu poll_list, and whoever clears that bit
301 * can remove from the list right before clearing the bit.
302 */
303 struct list_head poll_list;
304
305 unsigned long state;
306 int weight;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000307 unsigned int gro_count;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700308 int (*poll)(struct napi_struct *, int);
309#ifdef CONFIG_NETPOLL
310 spinlock_t poll_lock;
311 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700312#endif
Herbert Xu5d38a072009-01-04 16:13:40 -0800313 struct net_device *dev;
Herbert Xud565b0a2008-12-15 23:38:52 -0800314 struct sk_buff *gro_list;
Herbert Xu5d38a072009-01-04 16:13:40 -0800315 struct sk_buff *skb;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000316 struct list_head dev_list;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300317 struct hlist_node napi_hash_node;
318 unsigned int napi_id;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700319};
320
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800321enum {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700322 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800323 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800324 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300325 NAPI_STATE_HASHED, /* In NAPI hash */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700326};
327
Ben Hutchings5b252f02009-10-29 07:17:09 +0000328enum gro_result {
Herbert Xud1c76af2009-03-16 10:50:02 -0700329 GRO_MERGED,
330 GRO_MERGED_FREE,
331 GRO_HELD,
332 GRO_NORMAL,
333 GRO_DROP,
334};
Ben Hutchings5b252f02009-10-29 07:17:09 +0000335typedef enum gro_result gro_result_t;
Herbert Xud1c76af2009-03-16 10:50:02 -0700336
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000337/*
338 * enum rx_handler_result - Possible return values for rx_handlers.
339 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
340 * further.
341 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
342 * case skb->dev was changed by rx_handler.
343 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
344 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
345 *
346 * rx_handlers are functions called from inside __netif_receive_skb(), to do
347 * special processing of the skb, prior to delivery to protocol handlers.
348 *
349 * Currently, a net_device can only have a single rx_handler registered. Trying
350 * to register a second rx_handler will return -EBUSY.
351 *
352 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
353 * To unregister a rx_handler on a net_device, use
354 * netdev_rx_handler_unregister().
355 *
356 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
357 * do with the skb.
358 *
359 * If the rx_handler consumed to skb in some way, it should return
360 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
361 * the skb to be delivered in some other ways.
362 *
363 * If the rx_handler changed skb->dev, to divert the skb to another
364 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
365 * new device will be called if it exists.
366 *
367 * If the rx_handler consider the skb should be ignored, it should return
368 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
Adam Buchbinderd93cf062012-09-19 21:47:58 -0400369 * are registered on exact device (ptype->dev == skb->dev).
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000370 *
371 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
372 * delivered, it should return RX_HANDLER_PASS.
373 *
374 * A device without a registered rx_handler will behave as if rx_handler
375 * returned RX_HANDLER_PASS.
376 */
377
378enum rx_handler_result {
379 RX_HANDLER_CONSUMED,
380 RX_HANDLER_ANOTHER,
381 RX_HANDLER_EXACT,
382 RX_HANDLER_PASS,
383};
384typedef enum rx_handler_result rx_handler_result_t;
385typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000386
Joe Perchesf629d202013-09-26 14:48:15 -0700387void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700388
David S. Miller4d295152012-03-07 21:02:35 -0500389static inline bool napi_disable_pending(struct napi_struct *n)
David S. Millera0a46192008-01-07 20:35:07 -0800390{
391 return test_bit(NAPI_STATE_DISABLE, &n->state);
392}
393
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700394/**
395 * napi_schedule_prep - check if napi can be scheduled
396 * @n: napi context
397 *
398 * Test if NAPI routine is already running, and if not mark
399 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800400 * insure only one NAPI poll instance runs. We also make
401 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700402 */
David S. Miller4d295152012-03-07 21:02:35 -0500403static inline bool napi_schedule_prep(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700404{
David S. Millera0a46192008-01-07 20:35:07 -0800405 return !napi_disable_pending(n) &&
406 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700407}
408
409/**
410 * napi_schedule - schedule NAPI poll
411 * @n: napi context
412 *
413 * Schedule NAPI poll routine to be called if it is not already
414 * running.
415 */
416static inline void napi_schedule(struct napi_struct *n)
417{
418 if (napi_schedule_prep(n))
419 __napi_schedule(n);
420}
421
Roland Dreierbfe13f52007-10-09 15:47:37 -0700422/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
David S. Miller4d295152012-03-07 21:02:35 -0500423static inline bool napi_reschedule(struct napi_struct *napi)
Roland Dreierbfe13f52007-10-09 15:47:37 -0700424{
425 if (napi_schedule_prep(napi)) {
426 __napi_schedule(napi);
David S. Miller4d295152012-03-07 21:02:35 -0500427 return true;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700428 }
David S. Miller4d295152012-03-07 21:02:35 -0500429 return false;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700430}
431
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700432/**
433 * napi_complete - NAPI processing complete
434 * @n: napi context
435 *
436 * Mark NAPI processing as complete.
437 */
Joe Perchesf629d202013-09-26 14:48:15 -0700438void __napi_complete(struct napi_struct *n);
439void napi_complete(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700440
441/**
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300442 * napi_by_id - lookup a NAPI by napi_id
443 * @napi_id: hashed napi_id
444 *
445 * lookup @napi_id in napi_hash table
446 * must be called under rcu_read_lock()
447 */
Joe Perchesf629d202013-09-26 14:48:15 -0700448struct napi_struct *napi_by_id(unsigned int napi_id);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300449
450/**
451 * napi_hash_add - add a NAPI to global hashtable
452 * @napi: napi context
453 *
454 * generate a new napi_id and store a @napi under it in napi_hash
455 */
Joe Perchesf629d202013-09-26 14:48:15 -0700456void napi_hash_add(struct napi_struct *napi);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300457
458/**
459 * napi_hash_del - remove a NAPI from global table
460 * @napi: napi context
461 *
462 * Warning: caller must observe rcu grace period
463 * before freeing memory containing @napi
464 */
Joe Perchesf629d202013-09-26 14:48:15 -0700465void napi_hash_del(struct napi_struct *napi);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300466
467/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700468 * napi_disable - prevent NAPI from scheduling
469 * @n: napi context
470 *
471 * Stop NAPI from being scheduled on this context.
472 * Waits till any outstanding processing completes.
473 */
474static inline void napi_disable(struct napi_struct *n)
475{
Jacob Keller80c33dd2013-09-21 05:05:39 +0000476 might_sleep();
David S. Millera0a46192008-01-07 20:35:07 -0800477 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700478 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700479 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800480 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700481}
482
483/**
484 * napi_enable - enable NAPI scheduling
485 * @n: napi context
486 *
487 * Resume NAPI from being scheduled on this context.
488 * Must be paired with napi_disable.
489 */
490static inline void napi_enable(struct napi_struct *n)
491{
492 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100493 smp_mb__before_atomic();
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700494 clear_bit(NAPI_STATE_SCHED, &n->state);
495}
496
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700497#ifdef CONFIG_SMP
498/**
499 * napi_synchronize - wait until NAPI is not running
500 * @n: napi context
501 *
502 * Wait until NAPI is done being scheduled on this context.
503 * Waits till any outstanding processing completes but
504 * does not disable future activations.
505 */
506static inline void napi_synchronize(const struct napi_struct *n)
507{
508 while (test_bit(NAPI_STATE_SCHED, &n->state))
509 msleep(1);
510}
511#else
512# define napi_synchronize(n) barrier()
513#endif
514
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800515enum netdev_queue_state_t {
Tom Herbert734664982011-11-28 16:32:44 +0000516 __QUEUE_STATE_DRV_XOFF,
517 __QUEUE_STATE_STACK_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700518 __QUEUE_STATE_FROZEN,
David S. Miller79d16382008-07-08 23:14:46 -0700519};
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +0200520
521#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
522#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
523#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
524
525#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
526#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
527 QUEUE_STATE_FROZEN)
528#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
529 QUEUE_STATE_FROZEN)
530
Tom Herbert734664982011-11-28 16:32:44 +0000531/*
532 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
533 * netif_tx_* functions below are used to manipulate this flag. The
534 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
535 * queue independently. The netif_xmit_*stopped functions below are called
536 * to check if the queue has been stopped by the driver or stack (either
537 * of the XOFF bits are set in the state). Drivers should not need to call
538 * netif_xmit*stopped functions, they should only be using netif_tx_*.
539 */
David S. Miller79d16382008-07-08 23:14:46 -0700540
David S. Millerbb949fb2008-07-08 16:55:56 -0700541struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700542/*
543 * read mostly part
544 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700545 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700546 struct Qdisc *qdisc;
547 struct Qdisc *qdisc_sleeping;
david decotignyccf5ff62011-11-16 12:15:10 +0000548#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000549 struct kobject kobj;
550#endif
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000551#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
552 int numa_node;
553#endif
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700554/*
555 * write mostly part
556 */
557 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
558 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700559 /*
560 * please use this field instead of dev->trans_start
561 */
562 unsigned long trans_start;
david decotignyccf5ff62011-11-16 12:15:10 +0000563
564 /*
565 * Number of TX timeouts for this queue
566 * (/sys/class/net/DEV/Q/trans_timeout)
567 */
568 unsigned long trans_timeout;
Tom Herbert114cf582011-11-28 16:33:09 +0000569
570 unsigned long state;
571
572#ifdef CONFIG_BQL
573 struct dql dql;
574#endif
David S. Millere8a04642008-07-17 00:34:19 -0700575} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700576
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000577static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
578{
579#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
580 return q->numa_node;
581#else
Changli Gaob236da62010-12-14 03:09:15 +0000582 return NUMA_NO_NODE;
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000583#endif
584}
585
586static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
587{
588#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
589 q->numa_node = node;
590#endif
591}
592
Eric Dumazetdf334542010-03-24 19:13:54 +0000593#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000594/*
595 * This structure holds an RPS map which can be of variable length. The
596 * map is an array of CPUs.
597 */
598struct rps_map {
599 unsigned int len;
600 struct rcu_head rcu;
601 u16 cpus[0];
602};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000603#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
Tom Herbert0a9627f2010-03-16 08:03:29 +0000604
Tom Herbertfec5e652010-04-16 16:01:27 -0700605/*
Ben Hutchingsc4454772011-01-19 11:03:53 +0000606 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
607 * tail pointer for that CPU's input queue at the time of last enqueue, and
608 * a hardware filter index.
Tom Herbertfec5e652010-04-16 16:01:27 -0700609 */
610struct rps_dev_flow {
611 u16 cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +0000612 u16 filter;
Tom Herbertfec5e652010-04-16 16:01:27 -0700613 unsigned int last_qtail;
614};
Ben Hutchingsc4454772011-01-19 11:03:53 +0000615#define RPS_NO_FILTER 0xffff
Tom Herbertfec5e652010-04-16 16:01:27 -0700616
617/*
618 * The rps_dev_flow_table structure contains a table of flow mappings.
619 */
620struct rps_dev_flow_table {
621 unsigned int mask;
622 struct rcu_head rcu;
Tom Herbertfec5e652010-04-16 16:01:27 -0700623 struct rps_dev_flow flows[0];
624};
625#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000626 ((_num) * sizeof(struct rps_dev_flow)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700627
628/*
629 * The rps_sock_flow_table contains mappings of flows to the last CPU
630 * on which they were processed by the application (set in recvmsg).
631 */
632struct rps_sock_flow_table {
633 unsigned int mask;
634 u16 ents[0];
635};
636#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000637 ((_num) * sizeof(u16)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700638
639#define RPS_NO_CPU 0xffff
640
641static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
642 u32 hash)
643{
644 if (table && hash) {
645 unsigned int cpu, index = hash & table->mask;
646
647 /* We only give a hint, preemption can change cpu under us */
648 cpu = raw_smp_processor_id();
649
650 if (table->ents[index] != cpu)
651 table->ents[index] = cpu;
652 }
653}
654
655static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
656 u32 hash)
657{
658 if (table && hash)
659 table->ents[hash & table->mask] = RPS_NO_CPU;
660}
661
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000662extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
Tom Herbertfec5e652010-04-16 16:01:27 -0700663
Ben Hutchingsc4454772011-01-19 11:03:53 +0000664#ifdef CONFIG_RFS_ACCEL
Joe Perchesf629d202013-09-26 14:48:15 -0700665bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
666 u16 filter_id);
Ben Hutchingsc4454772011-01-19 11:03:53 +0000667#endif
Michael Daltona953be52014-01-16 22:23:28 -0800668#endif /* CONFIG_RPS */
Ben Hutchingsc4454772011-01-19 11:03:53 +0000669
Tom Herbert0a9627f2010-03-16 08:03:29 +0000670/* This structure contains an instance of an RX queue. */
671struct netdev_rx_queue {
Michael Daltona953be52014-01-16 22:23:28 -0800672#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table;
Michael Daltona953be52014-01-16 22:23:28 -0800675#endif
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000676 struct kobject kobj;
Tom Herbertfe822242010-11-09 10:47:38 +0000677 struct net_device *dev;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000678} ____cacheline_aligned_in_smp;
Michael Daltona953be52014-01-16 22:23:28 -0800679
680/*
681 * RX queue sysfs structures and functions.
682 */
683struct rx_queue_attribute {
684 struct attribute attr;
685 ssize_t (*show)(struct netdev_rx_queue *queue,
686 struct rx_queue_attribute *attr, char *buf);
687 ssize_t (*store)(struct netdev_rx_queue *queue,
688 struct rx_queue_attribute *attr, const char *buf, size_t len);
689};
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800690
Tom Herbertbf264142010-11-26 08:36:09 +0000691#ifdef CONFIG_XPS
692/*
693 * This structure holds an XPS map which can be of variable length. The
694 * map is an array of queues.
695 */
696struct xps_map {
697 unsigned int len;
698 unsigned int alloc_len;
699 struct rcu_head rcu;
700 u16 queues[0];
701};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000702#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
Tom Herbertbf264142010-11-26 08:36:09 +0000703#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
704 / sizeof(u16))
705
706/*
707 * This structure holds all XPS maps for device. Maps are indexed by CPU.
708 */
709struct xps_dev_maps {
710 struct rcu_head rcu;
Eric Dumazeta4177862010-11-28 21:43:02 +0000711 struct xps_map __rcu *cpu_map[0];
Tom Herbertbf264142010-11-26 08:36:09 +0000712};
713#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
714 (nr_cpu_ids * sizeof(struct xps_map *)))
715#endif /* CONFIG_XPS */
716
John Fastabend4f57c082011-01-17 08:06:04 +0000717#define TC_MAX_QUEUE 16
718#define TC_BITMASK 15
719/* HW offloaded queuing disciplines txq count and offset maps */
720struct netdev_tc_txq {
721 u16 count;
722 u16 offset;
723};
724
Neerav Parikh68bad942012-01-04 20:23:39 +0000725#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
726/*
727 * This structure is to hold information about the device
728 * configured to run FCoE protocol stack.
729 */
730struct netdev_fcoe_hbainfo {
731 char manufacturer[64];
732 char serial_number[64];
733 char hardware_version[64];
734 char driver_version[64];
735 char optionrom_version[64];
736 char firmware_version[64];
737 char model[256];
738 char model_description[256];
739};
740#endif
741
Jiri Pirko66b52b02013-07-29 18:16:49 +0200742#define MAX_PHYS_PORT_ID_LEN 32
743
744/* This structure holds a unique identifier to identify the
745 * physical port used by a netdevice.
746 */
747struct netdev_phys_port_id {
748 unsigned char id[MAX_PHYS_PORT_ID_LEN];
749 unsigned char id_len;
750};
751
Daniel Borkmann99932d42014-02-16 15:55:20 +0100752typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
753 struct sk_buff *skb);
754
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800755/*
756 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800757 * The following hooks can be defined; unless noted otherwise, they are
758 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800759 *
760 * int (*ndo_init)(struct net_device *dev);
761 * This function is called once when network device is registered.
762 * The network device can use this to any late stage initializaton
763 * or semantic validattion. It can fail with an error code which will
764 * be propogated back to register_netdev
765 *
766 * void (*ndo_uninit)(struct net_device *dev);
767 * This function is called when device is unregistered or when registration
768 * fails. It is not called if init fails.
769 *
770 * int (*ndo_open)(struct net_device *dev);
771 * This function is called when network device transistions to the up
772 * state.
773 *
774 * int (*ndo_stop)(struct net_device *dev);
775 * This function is called when network device transistions to the down
776 * state.
777 *
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000778 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
779 * struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800780 * Called when a packet needs to be transmitted.
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000781 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
782 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
Stephen Hemminger00829822008-11-20 20:14:53 -0800783 * Required can not be NULL.
784 *
Jason Wangf663dd92014-01-10 16:18:26 +0800785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +0100786 * void *accel_priv, select_queue_fallback_t fallback);
Stephen Hemminger00829822008-11-20 20:14:53 -0800787 * Called to decide which queue to when device supports multiple
788 * transmit queues.
789 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800790 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
791 * This function is called to allow device receiver to make
792 * changes to configuration when multicast or promiscious is enabled.
793 *
794 * void (*ndo_set_rx_mode)(struct net_device *dev);
795 * This function is called device changes address list filtering.
Jiri Pirko01789342011-08-16 06:29:00 +0000796 * If driver handles unicast address filtering, it should set
797 * IFF_UNICAST_FLT to its priv_flags.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800798 *
799 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
800 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -0700801 * needs to be changed. If this interface is not defined, the
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800802 * mac address can not be changed.
803 *
804 * int (*ndo_validate_addr)(struct net_device *dev);
805 * Test if Media Access Control address is valid for the device.
806 *
807 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
808 * Called when a user request an ioctl which can't be handled by
809 * the generic interface code. If not defined ioctl's return
810 * not supported error code.
811 *
812 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
813 * Used to set network devices bus interface parameters. This interface
814 * is retained for legacy reason, new devices should use the bus
815 * interface (PCI) for low level management.
816 *
817 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
818 * Called when a user wants to change the Maximum Transfer Unit
819 * of a device. If not defined, any request to change MTU will
820 * will return an error.
821 *
Stephen Hemminger00829822008-11-20 20:14:53 -0800822 * void (*ndo_tx_timeout)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800823 * Callback uses when the transmitter has not made any progress
824 * for dev->watchdog ticks.
825 *
Ben Hutchings3cfde792010-07-09 09:11:52 +0000826 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
Eric Dumazet28172732010-07-07 14:58:56 -0700827 * struct rtnl_link_stats64 *storage);
Wolfram Sangd308e382009-10-07 13:53:11 -0700828 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800829 * Called when a user wants to get the network device usage
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000830 * statistics. Drivers must do one of the following:
Ben Hutchings3cfde792010-07-09 09:11:52 +0000831 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
832 * rtnl_link_stats64 structure passed by the caller.
Ben Hutchings82695d92010-06-15 15:08:48 -0700833 * 2. Define @ndo_get_stats to update a net_device_stats structure
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000834 * (which should normally be dev->stats) and return a pointer to
835 * it. The structure may be changed asynchronously only if each
836 * field is written atomically.
837 * 3. Update dev->stats asynchronously and atomically, and define
838 * neither operation.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800839 *
Patrick McHardy80d5c362013-04-19 02:04:28 +0000840 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
841 * If device support VLAN filtering this function is called when a
842 * VLAN id is registered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800843 *
Jiri Pirko8e586132011-12-08 19:52:37 -0500844 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000845 * If device support VLAN filtering this function is called when a
846 * VLAN id is unregistered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800847 *
848 * void (*ndo_poll_controller)(struct net_device *dev);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000849 *
850 * SR-IOV management functions.
851 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
852 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -0400853 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
854 * int max_tx_rate);
Greg Rose5f8444a2011-10-08 03:05:24 +0000855 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000856 * int (*ndo_get_vf_config)(struct net_device *dev,
857 * int vf, struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +0300858 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -0700859 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
860 * struct nlattr *port[]);
861 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +0000862 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
863 * Called to setup 'tc' number of traffic classes in the net device. This
864 * is always called from the stack with the rtnl lock held and netif tx
865 * queues stopped. This allows the netdevice to perform queue management
866 * safely.
Ben Hutchingsc4454772011-01-19 11:03:53 +0000867 *
Yi Zoue9bce842011-03-09 08:48:03 +0000868 * Fiber Channel over Ethernet (FCoE) offload functions.
869 * int (*ndo_fcoe_enable)(struct net_device *dev);
870 * Called when the FCoE protocol stack wants to start using LLD for FCoE
871 * so the underlying device can perform whatever needed configuration or
872 * initialization to support acceleration of FCoE traffic.
873 *
874 * int (*ndo_fcoe_disable)(struct net_device *dev);
875 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
876 * so the underlying device can perform whatever needed clean-ups to
877 * stop supporting acceleration of FCoE traffic.
878 *
879 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
880 * struct scatterlist *sgl, unsigned int sgc);
881 * Called when the FCoE Initiator wants to initialize an I/O that
882 * is a possible candidate for Direct Data Placement (DDP). The LLD can
883 * perform necessary setup and returns 1 to indicate the device is set up
884 * successfully to perform DDP on this I/O, otherwise this returns 0.
885 *
886 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
887 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
888 * indicated by the FC exchange id 'xid', so the underlying device can
889 * clean up and reuse resources for later DDP requests.
890 *
891 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
892 * struct scatterlist *sgl, unsigned int sgc);
893 * Called when the FCoE Target wants to initialize an I/O that
894 * is a possible candidate for Direct Data Placement (DDP). The LLD can
895 * perform necessary setup and returns 1 to indicate the device is set up
896 * successfully to perform DDP on this I/O, otherwise this returns 0.
897 *
Neerav Parikh68bad942012-01-04 20:23:39 +0000898 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
899 * struct netdev_fcoe_hbainfo *hbainfo);
900 * Called when the FCoE Protocol stack wants information on the underlying
901 * device. This information is utilized by the FCoE protocol stack to
902 * register attributes with Fiber Channel management service as per the
903 * FC-GS Fabric Device Management Information(FDMI) specification.
904 *
Yi Zoue9bce842011-03-09 08:48:03 +0000905 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
906 * Called when the underlying device wants to override default World Wide
907 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
908 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
909 * protocol stack to use.
910 *
Ben Hutchingsc4454772011-01-19 11:03:53 +0000911 * RFS acceleration.
912 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
913 * u16 rxq_index, u32 flow_id);
914 * Set hardware filter for RFS. rxq_index is the target queue index;
915 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
916 * Return the filter ID on success, or a negative error code.
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000917 *
Jiri Pirko8b98a702013-01-03 22:49:02 +0000918 * Slave management functions (for bridge, bonding, etc).
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000919 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
920 * Called to make another netdev an underling.
921 *
922 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
923 * Called to release previously enslaved netdev.
Michał Mirosław5455c692011-02-15 16:59:17 +0000924 *
925 * Feature/offload setting functions.
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000926 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
927 * netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000928 * Adjusts the requested feature flags according to device-specific
929 * constraints, and returns the resulting flags. Must not modify
930 * the device state.
931 *
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000932 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000933 * Called to update device configuration to new features. Passed
934 * feature set might be less than what was returned by ndo_fix_features()).
935 * Must return >0 or -errno if it changed dev->features itself.
936 *
stephen hemmingeredc7d572012-10-01 12:32:33 +0000937 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
938 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000939 * const unsigned char *addr, u16 flags)
John Fastabend77162022012-04-15 06:43:56 +0000940 * Adds an FDB entry to dev for addr.
Vlad Yasevich1690be62013-02-13 12:00:18 +0000941 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
942 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000943 * const unsigned char *addr)
John Fastabend77162022012-04-15 06:43:56 +0000944 * Deletes the FDB entry from dev coresponding to addr.
945 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
Jamal Hadi Salim5d5eacb2014-07-10 07:01:58 -0400946 * struct net_device *dev, struct net_device *filter_dev,
947 * int idx)
John Fastabend77162022012-04-15 06:43:56 +0000948 * Used to add FDB entries to dump requests. Implementers should add
949 * entries to skb and update idx with the number of entries.
John Fastabende5a55a82012-10-24 08:12:57 +0000950 *
951 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
952 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
Dmitry Kravkov24f11a52013-03-27 06:54:00 +0000953 * struct net_device *dev, u32 filter_mask)
Jiri Pirko4bf84c32012-12-27 23:49:37 +0000954 *
955 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
956 * Called to change device carrier. Soft-devices (like dummy, team, etc)
957 * which do not represent real hardware may define this to allow their
958 * userspace components to manage their virtual carrier state. Devices
959 * that determine carrier state from physical hardware properties (eg
960 * network cables) or protocol-dependent mechanisms (eg
961 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
Jiri Pirko66b52b02013-07-29 18:16:49 +0200962 *
963 * int (*ndo_get_phys_port_id)(struct net_device *dev,
964 * struct netdev_phys_port_id *ppid);
965 * Called to get ID of physical port of this device. If driver does
966 * not implement this, it is assumed that the hw is not able to have
967 * multiple net devices on single physical port.
Joseph Gasparakis53cf52752013-09-04 02:13:38 -0700968 *
969 * void (*ndo_add_vxlan_port)(struct net_device *dev,
Joseph Gasparakis35e42372013-09-13 07:34:13 -0700970 * sa_family_t sa_family, __be16 port);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -0700971 * Called by vxlan to notiy a driver about the UDP port and socket
972 * address family that vxlan is listnening to. It is called only when
973 * a new port starts listening. The operation is protected by the
974 * vxlan_net->sock_lock.
975 *
976 * void (*ndo_del_vxlan_port)(struct net_device *dev,
Joseph Gasparakis35e42372013-09-13 07:34:13 -0700977 * sa_family_t sa_family, __be16 port);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -0700978 * Called by vxlan to notify the driver about a UDP port and socket
979 * address family that vxlan is not listening to anymore. The operation
980 * is protected by the vxlan_net->sock_lock.
John Fastabenda6cc0cf2013-11-06 09:54:46 -0800981 *
982 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
983 * struct net_device *dev)
984 * Called by upper layer devices to accelerate switching or other
985 * station functionality into hardware. 'pdev is the lowerdev
986 * to use for the offload and 'dev' is the net device that will
987 * back the offload. Returns a pointer to the private structure
988 * the upper layer will maintain.
989 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
990 * Called by upper layer device to delete the station created
991 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
992 * the station and priv is the structure returned by the add
993 * operation.
994 * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
995 * struct net_device *dev,
996 * void *priv);
997 * Callback to use for xmit over the accelerated station. This
998 * is used in place of ndo_start_xmit on accelerated net
999 * devices.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001000 */
1001struct net_device_ops {
1002 int (*ndo_init)(struct net_device *dev);
1003 void (*ndo_uninit)(struct net_device *dev);
1004 int (*ndo_open)(struct net_device *dev);
1005 int (*ndo_stop)(struct net_device *dev);
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +00001006 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
Stephen Hemminger00829822008-11-20 20:14:53 -08001007 struct net_device *dev);
1008 u16 (*ndo_select_queue)(struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08001009 struct sk_buff *skb,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001010 void *accel_priv,
1011 select_queue_fallback_t fallback);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001012 void (*ndo_change_rx_flags)(struct net_device *dev,
1013 int flags);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001014 void (*ndo_set_rx_mode)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001015 int (*ndo_set_mac_address)(struct net_device *dev,
1016 void *addr);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001017 int (*ndo_validate_addr)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001018 int (*ndo_do_ioctl)(struct net_device *dev,
1019 struct ifreq *ifr, int cmd);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001020 int (*ndo_set_config)(struct net_device *dev,
1021 struct ifmap *map);
Stephen Hemminger00829822008-11-20 20:14:53 -08001022 int (*ndo_change_mtu)(struct net_device *dev,
1023 int new_mtu);
1024 int (*ndo_neigh_setup)(struct net_device *dev,
1025 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001026 void (*ndo_tx_timeout) (struct net_device *dev);
1027
Eric Dumazet28172732010-07-07 14:58:56 -07001028 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1029 struct rtnl_link_stats64 *storage);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001030 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1031
Jiri Pirko8e586132011-12-08 19:52:37 -05001032 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00001033 __be16 proto, u16 vid);
Jiri Pirko8e586132011-12-08 19:52:37 -05001034 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00001035 __be16 proto, u16 vid);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001036#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001037 void (*ndo_poll_controller)(struct net_device *dev);
Herbert Xu4247e162010-06-10 16:12:47 +00001038 int (*ndo_netpoll_setup)(struct net_device *dev,
Eric W. Biedermana8779ec2014-03-27 15:36:38 -07001039 struct netpoll_info *info);
WANG Cong0e34e932010-05-06 00:47:21 -07001040 void (*ndo_netpoll_cleanup)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001041#endif
Cong Wange0d10952013-08-01 11:10:25 +08001042#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001043 int (*ndo_busy_poll)(struct napi_struct *dev);
Eliezer Tamir06021292013-06-10 11:39:50 +03001044#endif
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001045 int (*ndo_set_vf_mac)(struct net_device *dev,
1046 int queue, u8 *mac);
1047 int (*ndo_set_vf_vlan)(struct net_device *dev,
1048 int queue, u16 vlan, u8 qos);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001049 int (*ndo_set_vf_rate)(struct net_device *dev,
1050 int vf, int min_tx_rate,
1051 int max_tx_rate);
Greg Rose5f8444a2011-10-08 03:05:24 +00001052 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1053 int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001054 int (*ndo_get_vf_config)(struct net_device *dev,
1055 int vf,
1056 struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +03001057 int (*ndo_set_vf_link_state)(struct net_device *dev,
1058 int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -07001059 int (*ndo_set_vf_port)(struct net_device *dev,
1060 int vf,
1061 struct nlattr *port[]);
1062 int (*ndo_get_vf_port)(struct net_device *dev,
1063 int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +00001064 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001065#if IS_ENABLED(CONFIG_FCOE)
Yi Zoucb454392009-08-31 12:31:36 +00001066 int (*ndo_fcoe_enable)(struct net_device *dev);
1067 int (*ndo_fcoe_disable)(struct net_device *dev);
Yi Zou4d288d52009-02-27 14:06:59 -08001068 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1069 u16 xid,
1070 struct scatterlist *sgl,
1071 unsigned int sgc);
1072 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1073 u16 xid);
Yi Zou6247e082011-02-01 07:22:06 +00001074 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1075 u16 xid,
1076 struct scatterlist *sgl,
1077 unsigned int sgc);
Neerav Parikh68bad942012-01-04 20:23:39 +00001078 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1079 struct netdev_fcoe_hbainfo *hbainfo);
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001080#endif
1081
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001082#if IS_ENABLED(CONFIG_LIBFCOE)
Yi Zoudf5c7942009-10-28 18:24:35 +00001083#define NETDEV_FCOE_WWNN 0
1084#define NETDEV_FCOE_WWPN 1
1085 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1086 u64 *wwn, int type);
Yi Zou4d288d52009-02-27 14:06:59 -08001087#endif
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001088
Ben Hutchingsc4454772011-01-19 11:03:53 +00001089#ifdef CONFIG_RFS_ACCEL
1090 int (*ndo_rx_flow_steer)(struct net_device *dev,
1091 const struct sk_buff *skb,
1092 u16 rxq_index,
1093 u32 flow_id);
1094#endif
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001095 int (*ndo_add_slave)(struct net_device *dev,
1096 struct net_device *slave_dev);
1097 int (*ndo_del_slave)(struct net_device *dev,
1098 struct net_device *slave_dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001099 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1100 netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001101 int (*ndo_set_features)(struct net_device *dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001102 netdev_features_t features);
David Millerda6a8fa2011-07-25 00:01:38 +00001103 int (*ndo_neigh_construct)(struct neighbour *n);
David S. Miller447f2192011-12-19 15:04:41 -05001104 void (*ndo_neigh_destroy)(struct neighbour *n);
John Fastabend77162022012-04-15 06:43:56 +00001105
1106 int (*ndo_fdb_add)(struct ndmsg *ndm,
stephen hemmingeredc7d572012-10-01 12:32:33 +00001107 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001108 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001109 const unsigned char *addr,
John Fastabend77162022012-04-15 06:43:56 +00001110 u16 flags);
1111 int (*ndo_fdb_del)(struct ndmsg *ndm,
Vlad Yasevich1690be62013-02-13 12:00:18 +00001112 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001113 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001114 const unsigned char *addr);
John Fastabend77162022012-04-15 06:43:56 +00001115 int (*ndo_fdb_dump)(struct sk_buff *skb,
1116 struct netlink_callback *cb,
1117 struct net_device *dev,
Jamal Hadi Salim5d5eacb2014-07-10 07:01:58 -04001118 struct net_device *filter_dev,
John Fastabend77162022012-04-15 06:43:56 +00001119 int idx);
John Fastabende5a55a82012-10-24 08:12:57 +00001120
1121 int (*ndo_bridge_setlink)(struct net_device *dev,
1122 struct nlmsghdr *nlh);
1123 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1124 u32 pid, u32 seq,
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00001125 struct net_device *dev,
1126 u32 filter_mask);
Vlad Yasevich407af322013-02-13 12:00:12 +00001127 int (*ndo_bridge_dellink)(struct net_device *dev,
1128 struct nlmsghdr *nlh);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001129 int (*ndo_change_carrier)(struct net_device *dev,
1130 bool new_carrier);
Jiri Pirko66b52b02013-07-29 18:16:49 +02001131 int (*ndo_get_phys_port_id)(struct net_device *dev,
1132 struct netdev_phys_port_id *ppid);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -07001133 void (*ndo_add_vxlan_port)(struct net_device *dev,
1134 sa_family_t sa_family,
Joseph Gasparakis35e42372013-09-13 07:34:13 -07001135 __be16 port);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -07001136 void (*ndo_del_vxlan_port)(struct net_device *dev,
1137 sa_family_t sa_family,
Joseph Gasparakis35e42372013-09-13 07:34:13 -07001138 __be16 port);
John Fastabenda6cc0cf2013-11-06 09:54:46 -08001139
1140 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1141 struct net_device *dev);
1142 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1143 void *priv);
1144
1145 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1146 struct net_device *dev,
1147 void *priv);
Vlad Yasevich25175ba2014-05-16 17:04:54 -04001148 int (*ndo_get_lock_subclass)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001149};
1150
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001151/**
1152 * enum net_device_priv_flags - &struct net_device priv_flags
1153 *
1154 * These are the &struct net_device, they are only set internally
1155 * by drivers and used in the kernel. These flags are invisible to
1156 * userspace, this means that the order of these flags can change
1157 * during any kernel release.
1158 *
1159 * You should have a pretty good reason to be extending these flags.
1160 *
1161 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1162 * @IFF_EBRIDGE: Ethernet bridging device
1163 * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
1164 * @IFF_MASTER_8023AD: bonding master, 802.3ad
1165 * @IFF_MASTER_ALB: bonding master, balance-alb
1166 * @IFF_BONDING: bonding master or slave
1167 * @IFF_SLAVE_NEEDARP: need ARPs for validation
1168 * @IFF_ISATAP: ISATAP interface (RFC4214)
1169 * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
1170 * @IFF_WAN_HDLC: WAN HDLC device
1171 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1172 * release skb->dst
1173 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1174 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1175 * @IFF_MACVLAN_PORT: device used as macvlan port
1176 * @IFF_BRIDGE_PORT: device used as bridge port
1177 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1178 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1179 * @IFF_UNICAST_FLT: Supports unicast filtering
1180 * @IFF_TEAM_PORT: device used as team port
1181 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1182 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1183 * change when it's running
1184 * @IFF_MACVLAN: Macvlan device
1185 */
1186enum netdev_priv_flags {
1187 IFF_802_1Q_VLAN = 1<<0,
1188 IFF_EBRIDGE = 1<<1,
1189 IFF_SLAVE_INACTIVE = 1<<2,
1190 IFF_MASTER_8023AD = 1<<3,
1191 IFF_MASTER_ALB = 1<<4,
1192 IFF_BONDING = 1<<5,
1193 IFF_SLAVE_NEEDARP = 1<<6,
1194 IFF_ISATAP = 1<<7,
1195 IFF_MASTER_ARPMON = 1<<8,
1196 IFF_WAN_HDLC = 1<<9,
1197 IFF_XMIT_DST_RELEASE = 1<<10,
1198 IFF_DONT_BRIDGE = 1<<11,
1199 IFF_DISABLE_NETPOLL = 1<<12,
1200 IFF_MACVLAN_PORT = 1<<13,
1201 IFF_BRIDGE_PORT = 1<<14,
1202 IFF_OVS_DATAPATH = 1<<15,
1203 IFF_TX_SKB_SHARING = 1<<16,
1204 IFF_UNICAST_FLT = 1<<17,
1205 IFF_TEAM_PORT = 1<<18,
1206 IFF_SUPP_NOFCS = 1<<19,
1207 IFF_LIVE_ADDR_CHANGE = 1<<20,
1208 IFF_MACVLAN = 1<<21,
1209};
1210
1211#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1212#define IFF_EBRIDGE IFF_EBRIDGE
1213#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
1214#define IFF_MASTER_8023AD IFF_MASTER_8023AD
1215#define IFF_MASTER_ALB IFF_MASTER_ALB
1216#define IFF_BONDING IFF_BONDING
1217#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
1218#define IFF_ISATAP IFF_ISATAP
1219#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
1220#define IFF_WAN_HDLC IFF_WAN_HDLC
1221#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1222#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1223#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1224#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1225#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1226#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1227#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1228#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1229#define IFF_TEAM_PORT IFF_TEAM_PORT
1230#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1231#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1232#define IFF_MACVLAN IFF_MACVLAN
1233
Karoly Kemeny536721b2014-07-30 20:27:36 +02001234/**
1235 * struct net_device - The DEVICE structure.
1236 * Actually, this whole structure is a big mistake. It mixes I/O
1237 * data with strictly "high-level" data, and it has to know about
1238 * almost every data structure used in the INET module.
1239 *
1240 * @name: This is the first field of the "visible" part of this structure
1241 * (i.e. as seen by users in the "Space.c" file). It is the name
1242 * of the interface.
1243 *
1244 * @name_hlist: Device name hash chain, please keep it close to name[]
1245 * @ifalias: SNMP alias
1246 * @mem_end: Shared memory end
1247 * @mem_start: Shared memory start
1248 * @base_addr: Device I/O address
1249 * @irq: Device IRQ number
1250 *
1251 * @state: Generic network queuing layer state, see netdev_state_t
1252 * @dev_list: The global list of network devices
1253 * @napi_list: List entry, that is used for polling napi devices
1254 * @unreg_list: List entry, that is used, when we are unregistering the
1255 * device, see the function unregister_netdev
1256 * @close_list: List entry, that is used, when we are closing the device
1257 *
1258 * @adj_list: Directly linked devices, like slaves for bonding
1259 * @all_adj_list: All linked devices, *including* neighbours
1260 * @features: Currently active device features
1261 * @hw_features: User-changeable features
1262 *
1263 * @wanted_features: User-requested features
1264 * @vlan_features: Mask of features inheritable by VLAN devices
1265 *
1266 * @hw_enc_features: Mask of features inherited by encapsulating devices
1267 * This field indicates what encapsulation
1268 * offloads the hardware is capable of doing,
1269 * and drivers will need to set them appropriately.
1270 *
1271 * @mpls_features: Mask of features inheritable by MPLS
1272 *
1273 * @ifindex: interface index
1274 * @iflink: unique device identifier
1275 *
1276 * @stats: Statistics struct, which was left as a legacy, use
1277 * rtnl_link_stats64 instead
1278 *
1279 * @rx_dropped: Dropped packets by core network,
1280 * do not use this in drivers
1281 * @tx_dropped: Dropped packets by core network,
1282 * do not use this in drivers
1283 *
1284 * @carrier_changes: Stats to monitor carrier on<->off transitions
1285 *
1286 * @wireless_handlers: List of functions to handle Wireless Extensions,
1287 * instead of ioctl,
1288 * see <net/iw_handler.h> for details.
1289 * @wireless_data: Instance data managed by the core of wireless extensions
1290 *
1291 * @netdev_ops: Includes several pointers to callbacks,
1292 * if one wants to override the ndo_*() functions
1293 * @ethtool_ops: Management operations
1294 * @fwd_ops: Management operations
1295 * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc
1296 * of Layer 2 headers.
1297 *
1298 * @flags: Interface flags (a la BSD)
1299 * @priv_flags: Like 'flags' but invisible to userspace,
1300 * see if.h for the definitions
1301 * @gflags: Global flags ( kept as legacy )
1302 * @padded: How much padding added by alloc_netdev()
1303 * @operstate: RFC2863 operstate
1304 * @link_mode: Mapping policy to operstate
1305 * @if_port: Selectable AUI, TP, ...
1306 * @dma: DMA channel
1307 * @mtu: Interface MTU value
1308 * @type: Interface hardware type
1309 * @hard_header_len: Hardware header length
1310 *
1311 * @needed_headroom: Extra headroom the hardware may need, but not in all
1312 * cases can this be guaranteed
1313 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1314 * cases can this be guaranteed. Some cases also use
1315 * LL_MAX_HEADER instead to allocate the skb
1316 *
1317 * interface address info:
1318 *
1319 * @perm_addr: Permanent hw address
1320 * @addr_assign_type: Hw address assignment type
1321 * @addr_len: Hardware address length
1322 * @neigh_priv_len; Used in neigh_alloc(),
1323 * initialized only in atm/clip.c
1324 * @dev_id: Used to differentiate devices that share
1325 * the same link layer address
1326 * @dev_port: Used to differentiate devices that share
1327 * the same function
1328 * @addr_list_lock: XXX: need comments on this one
1329 * @uc: unicast mac addresses
1330 * @mc: multicast mac addresses
1331 * @dev_addrs: list of device hw addresses
1332 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1333 * @uc_promisc: Counter, that indicates, that promiscuous mode
1334 * has been enabled due to the need to listen to
1335 * additional unicast addresses in a device that
1336 * does not implement ndo_set_rx_mode()
1337 * @promiscuity: Number of times, the NIC is told to work in
1338 * Promiscuous mode, if it becomes 0 the NIC will
1339 * exit from working in Promiscuous mode
1340 * @allmulti: Counter, enables or disables allmulticast mode
1341 *
1342 * @vlan_info: VLAN info
1343 * @dsa_ptr: dsa specific data
1344 * @tipc_ptr: TIPC specific data
1345 * @atalk_ptr: AppleTalk link
1346 * @ip_ptr: IPv4 specific data
1347 * @dn_ptr: DECnet specific data
1348 * @ip6_ptr: IPv6 specific data
1349 * @ax25_ptr: AX.25 specific data
1350 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1351 *
1352 * @last_rx: Time of last Rx
1353 * @dev_addr: Hw address (before bcast,
1354 * because most packets are unicast)
1355 *
1356 * @_rx: Array of RX queues
1357 * @num_rx_queues: Number of RX queues
1358 * allocated at register_netdev() time
1359 * @real_num_rx_queues: Number of RX queues currently active in device
1360 *
1361 * @rx_handler: handler for received packets
1362 * @rx_handler_data: XXX: need comments on this one
1363 * @ingress_queue: XXX: need comments on this one
1364 * @broadcast: hw bcast address
1365 *
1366 * @_tx: Array of TX queues
1367 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1368 * @real_num_tx_queues: Number of TX queues currently active in device
1369 * @qdisc: Root qdisc from userspace point of view
1370 * @tx_queue_len: Max frames per queue allowed
1371 * @tx_global_lock: XXX: need comments on this one
1372 *
1373 * @xps_maps: XXX: need comments on this one
1374 *
1375 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1376 * indexed by RX queue number. Assigned by driver.
1377 * This must only be set if the ndo_rx_flow_steer
1378 * operation is defined
1379 *
1380 * @trans_start: Time (in jiffies) of last Tx
1381 * @watchdog_timeo: Represents the timeout that is used by
1382 * the watchdog ( see dev_watchdog() )
1383 * @watchdog_timer: List of timers
1384 *
1385 * @pcpu_refcnt: Number of references to this device
1386 * @todo_list: Delayed register/unregister
1387 * @index_hlist: Device index hash chain
1388 * @link_watch_list: XXX: need comments on this one
1389 *
1390 * @reg_state: Register/unregister state machine
1391 * @dismantle: Device is going to be freed
1392 * @rtnl_link_state: This enum represents the phases of creating
1393 * a new link
1394 *
1395 * @destructor: Called from unregister,
1396 * can be used to call free_netdev
1397 * @npinfo: XXX: need comments on this one
1398 * @nd_net: Network namespace this network device is inside
1399 *
1400 * @ml_priv: Mid-layer private
1401 * @lstats: Loopback statistics
1402 * @tstats: Tunnel statistics
1403 * @dstats: Dummy statistics
1404 * @vstats: Virtual ethernet statistics
1405 *
1406 * @garp_port: GARP
1407 * @mrp_port: MRP
1408 *
1409 * @dev: Class/net/name entry
1410 * @sysfs_groups: Space for optional device, statistics and wireless
1411 * sysfs groups
1412 *
1413 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1414 * @rtnl_link_ops: Rtnl_link_ops
1415 *
1416 * @gso_max_size: Maximum size of generic segmentation offload
1417 * @gso_max_segs: Maximum number of segments that can be passed to the
1418 * NIC for GSO
1419 *
1420 * @dcbnl_ops: Data Center Bridging netlink ops
1421 * @num_tc: Number of traffic classes in the net device
1422 * @tc_to_txq: XXX: need comments on this one
1423 * @prio_tc_map XXX: need comments on this one
1424 *
1425 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1426 *
1427 * @priomap: XXX: need comments on this one
1428 * @phydev: Physical device may attach itself
1429 * for hardware timestamping
1430 *
1431 * @qdisc_tx_busylock: XXX: need comments on this one
1432 *
1433 * @group: The group, that the device belongs to
1434 * @pm_qos_req: Power Management QoS object
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 *
1436 * FIXME: cleanup struct net_device such that network protocol info
1437 * moves out.
1438 */
1439
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001440struct net_device {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 char name[IFNAMSIZ];
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001442 struct hlist_node name_hlist;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001443 char *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 /*
1445 * I/O specific fields
1446 * FIXME: Merge these and struct ifmap into one
1447 */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001448 unsigned long mem_end;
1449 unsigned long mem_start;
1450 unsigned long base_addr;
1451 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 /*
Karoly Kemeny536721b2014-07-30 20:27:36 +02001454 * Some hardware also needs these fields (state,dev_list,
1455 * napi_list,unreg_list,close_list) but they are not
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 * part of the usual set specified in Space.c.
1457 */
1458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 unsigned long state;
1460
Pavel Emelianov7562f872007-05-03 15:13:45 -07001461 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001462 struct list_head napi_list;
Eric Dumazet44a08732009-10-27 07:03:04 +00001463 struct list_head unreg_list;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001464 struct list_head close_list;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001465
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001466 struct {
1467 struct list_head upper;
1468 struct list_head lower;
1469 } adj_list;
1470
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001471 struct {
1472 struct list_head upper;
1473 struct list_head lower;
1474 } all_adj_list;
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001475
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001476 netdev_features_t features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001477 netdev_features_t hw_features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001478 netdev_features_t wanted_features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001479 netdev_features_t vlan_features;
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00001480 netdev_features_t hw_enc_features;
Simon Horman0d89d202013-05-23 21:02:52 +00001481 netdev_features_t mpls_features;
Michał Mirosław04ed3e72011-01-24 15:32:47 -08001482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 int ifindex;
1484 int iflink;
1485
Rusty Russellc45d2862007-03-28 14:29:08 -07001486 struct net_device_stats stats;
Eric Dumazet015f0682014-03-27 08:45:56 -07001487
Eric Dumazet015f0682014-03-27 08:45:56 -07001488 atomic_long_t rx_dropped;
1489 atomic_long_t tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
david decotigny2d3b4792014-03-29 09:48:35 -07001491 atomic_t carrier_changes;
1492
Johannes Bergb86e0282007-04-26 20:48:23 -07001493#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 const struct iw_handler_def * wireless_handlers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -07001496#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001497 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -07001498 const struct ethtool_ops *ethtool_ops;
John Fastabenda6cc0cf2013-11-06 09:54:46 -08001499 const struct forwarding_accel_ops *fwd_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001501 const struct header_ops *header_ops;
1502
Karoly Kemeny536721b2014-07-30 20:27:36 +02001503 unsigned int flags;
1504 unsigned int priv_flags;
1505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 unsigned short gflags;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001507 unsigned short padded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Karoly Kemeny536721b2014-07-30 20:27:36 +02001509 unsigned char operstate;
1510 unsigned char link_mode;
Stefan Rompfb00055a2006-03-20 17:09:11 -08001511
Karoly Kemeny536721b2014-07-30 20:27:36 +02001512 unsigned char if_port;
1513 unsigned char dma;
Joe Perchesbdc220d2011-05-09 17:42:46 +00001514
Karoly Kemeny536721b2014-07-30 20:27:36 +02001515 unsigned int mtu;
1516 unsigned short type;
1517 unsigned short hard_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Johannes Bergf5184d22008-05-12 20:48:31 -07001519 unsigned short needed_headroom;
1520 unsigned short needed_tailroom;
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 /* Interface address info. */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001523 unsigned char perm_addr[MAX_ADDR_LEN];
1524 unsigned char addr_assign_type;
1525 unsigned char addr_len;
Sebastian Siewiora0a96632013-12-12 10:15:59 +01001526 unsigned short neigh_priv_len;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001527 unsigned short dev_id;
1528 unsigned short dev_port;
Jiri Pirkoccffad252009-05-22 23:22:17 +00001529 spinlock_t addr_list_lock;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001530 struct netdev_hw_addr_list uc;
1531 struct netdev_hw_addr_list mc;
1532 struct netdev_hw_addr_list dev_addrs;
1533
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001534#ifdef CONFIG_SYSFS
1535 struct kset *queues_kset;
1536#endif
1537
Tom Gundersen685343f2014-07-14 16:37:22 +02001538 unsigned char name_assign_type;
1539
Joe Perches2d348d12011-07-25 16:17:35 -07001540 bool uc_promisc;
Wang Chen9d45abe2008-06-17 21:12:48 -07001541 unsigned int promiscuity;
1542 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544
1545 /* Protocol specific pointers */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001546
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001547#if IS_ENABLED(CONFIG_VLAN_8021Q)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001548 struct vlan_info __rcu *vlan_info;
Jesse Gross65ac6a52010-10-20 13:56:05 +00001549#endif
Ben Hutchings34a430d2011-11-25 14:38:38 +00001550#if IS_ENABLED(CONFIG_NET_DSA)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001551 struct dsa_switch_tree *dsa_ptr;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001552#endif
Ying Xue37cb0622013-12-10 20:45:41 -08001553#if IS_ENABLED(CONFIG_TIPC)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001554 struct tipc_bearer __rcu *tipc_ptr;
Ying Xue37cb0622013-12-10 20:45:41 -08001555#endif
Karoly Kemeny536721b2014-07-30 20:27:36 +02001556 void *atalk_ptr;
1557 struct in_device __rcu *ip_ptr;
1558 struct dn_dev __rcu *dn_ptr;
1559 struct inet6_dev __rcu *ip6_ptr;
1560 void *ax25_ptr;
1561 struct wireless_dev *ieee80211_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001563/*
Eric Dumazetcd135392010-09-16 02:58:13 +00001564 * Cache lines mostly used on receive path (including eth_type_trans())
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001565 */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001566 unsigned long last_rx;
Eric Dumazet4dc89132010-08-31 07:40:16 +00001567
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001568 /* Interface address info used in eth_type_trans() */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001569 unsigned char *dev_addr;
Jiri Pirkof001fde2009-05-05 02:48:28 +00001570
Tom Herbert0a9627f2010-03-16 08:03:29 +00001571
Michael Daltona953be52014-01-16 22:23:28 -08001572#ifdef CONFIG_SYSFS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001573 struct netdev_rx_queue *_rx;
1574
Tom Herbert0a9627f2010-03-16 08:03:29 +00001575 unsigned int num_rx_queues;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001576 unsigned int real_num_rx_queues;
Ben Hutchingsc4454772011-01-19 11:03:53 +00001577
Eric Dumazetdf334542010-03-24 19:13:54 +00001578#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001579
stephen hemminger61391cd2010-11-15 06:38:12 +00001580 rx_handler_func_t __rcu *rx_handler;
1581 void __rcu *rx_handler_data;
David S. Millere8a04642008-07-17 00:34:19 -07001582
Eric Dumazet24824a02010-10-02 06:11:55 +00001583 struct netdev_queue __rcu *ingress_queue;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001584 unsigned char broadcast[MAX_ADDR_LEN];
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001585
Eric Dumazetcd135392010-09-16 02:58:13 +00001586
1587/*
1588 * Cache lines mostly used on transmit path
1589 */
David S. Millere8a04642008-07-17 00:34:19 -07001590 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1591 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001592 unsigned int real_num_tx_queues;
Patrick McHardyaf356af2009-09-04 06:41:18 +00001593 struct Qdisc *qdisc;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001594 unsigned long tx_queue_len;
David S. Millerc3f26a22008-07-31 16:58:50 -07001595 spinlock_t tx_global_lock;
Eric Dumazetcd135392010-09-16 02:58:13 +00001596
Tom Herbertbf264142010-11-26 08:36:09 +00001597#ifdef CONFIG_XPS
Eric Dumazeta4177862010-11-28 21:43:02 +00001598 struct xps_dev_maps __rcu *xps_maps;
Tom Herbertbf264142010-11-26 08:36:09 +00001599#endif
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001600#ifdef CONFIG_RFS_ACCEL
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001601 struct cpu_rmap *rx_cpu_rmap;
1602#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001603
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001604 /* These may be needed for future network-power-down code. */
Eric Dumazet9d214932009-05-17 20:55:16 -07001605
1606 /*
1607 * trans_start here is expensive for high speed devices on SMP,
1608 * please use netdev_queue->trans_start instead.
1609 */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001610 unsigned long trans_start;
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001611
Karoly Kemeny536721b2014-07-30 20:27:36 +02001612 int watchdog_timeo;
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001613 struct timer_list watchdog_timer;
1614
Eric Dumazet29b44332010-10-11 10:22:12 +00001615 int __percpu *pcpu_refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
Karoly Kemeny536721b2014-07-30 20:27:36 +02001618 struct hlist_node index_hlist;
Eric Dumazete014deb2009-11-17 05:59:21 +00001619 struct list_head link_watch_list;
Herbert Xu572a1032007-05-08 18:34:17 -07001620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07001622 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 NETREG_UNREGISTERING, /* called unregister_netdevice */
1624 NETREG_UNREGISTERED, /* completed unregister todo */
1625 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001626 NETREG_DUMMY, /* dummy device for NAPI poll */
Eric Dumazet449f4542011-05-19 12:24:16 +00001627 } reg_state:8;
1628
Karoly Kemeny536721b2014-07-30 20:27:36 +02001629 bool dismantle;
Patrick McHardya2835762010-02-26 06:34:51 +00001630
1631 enum {
1632 RTNL_LINK_INITIALIZED,
1633 RTNL_LINK_INITIALIZING,
1634 } rtnl_link_state:16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001636 void (*destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638#ifdef CONFIG_NETPOLL
Cong Wang5fbee842013-01-22 21:29:39 +00001639 struct netpoll_info __rcu *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640#endif
David S. Millereae792b2008-07-15 03:03:33 -07001641
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001642#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001643 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001644#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001645
David S. Miller49517042008-05-12 03:29:11 -07001646 /* mid-layer private */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001647 union {
Karoly Kemeny536721b2014-07-30 20:27:36 +02001648 void *ml_priv;
1649 struct pcpu_lstats __percpu *lstats;
Li RongQing8f849852014-01-04 13:57:59 +08001650 struct pcpu_sw_netstats __percpu *tstats;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001651 struct pcpu_dstats __percpu *dstats;
1652 struct pcpu_vstats __percpu *vstats;
Eric Dumazeta7855c72010-09-23 23:51:51 +00001653 };
Karoly Kemeny536721b2014-07-30 20:27:36 +02001654
Eric Dumazet3cc77ec2010-10-24 21:32:36 +00001655 struct garp_port __rcu *garp_port;
David Wardfebf0182013-02-08 17:17:06 +00001656 struct mrp_port __rcu *mrp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
Karoly Kemeny536721b2014-07-30 20:27:36 +02001658 struct device dev;
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001659 const struct attribute_group *sysfs_groups[4];
Michael Daltona953be52014-01-16 22:23:28 -08001660 const struct attribute_group *sysfs_rx_queue_group;
Patrick McHardy38f7b872007-06-13 12:03:51 -07001661
Patrick McHardy38f7b872007-06-13 12:03:51 -07001662 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001663
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001664 /* for setting kernel sock attribute on TCP connection setup */
1665#define GSO_MAX_SIZE 65536
1666 unsigned int gso_max_size;
Ben Hutchings30b678d2012-07-30 15:57:00 +00001667#define GSO_MAX_SEGS 65535
1668 u16 gso_max_segs;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001669
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08001670#ifdef CONFIG_DCB
Stephen Hemminger32953542009-10-05 06:01:03 +00001671 const struct dcbnl_rtnl_ops *dcbnl_ops;
Alexander Duyck2f90b862008-11-20 20:52:10 -08001672#endif
John Fastabend4f57c082011-01-17 08:06:04 +00001673 u8 num_tc;
1674 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1675 u8 prio_tc_map[TC_BITMASK + 1];
Alexander Duyck2f90b862008-11-20 20:52:10 -08001676
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001677#if IS_ENABLED(CONFIG_FCOE)
Yi Zou4d288d52009-02-27 14:06:59 -08001678 unsigned int fcoe_ddp_xid;
1679#endif
Daniel Borkmann86f85152013-12-29 17:27:11 +01001680#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00001681 struct netprio_map __rcu *priomap;
1682#endif
Richard Cochranc1f19b52010-07-17 08:49:36 +00001683 struct phy_device *phydev;
Eric Dumazet23d3b8b2012-09-05 01:02:56 +00001684 struct lock_class_key *qdisc_tx_busylock;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00001685 int group;
Eric Dumazet91364612012-06-11 06:36:13 +00001686 struct pm_qos_request pm_qos_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001688#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
1690#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
David S. Millere8a04642008-07-17 00:34:19 -07001692static inline
John Fastabend4f57c082011-01-17 08:06:04 +00001693int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1694{
1695 return dev->prio_tc_map[prio & TC_BITMASK];
1696}
1697
1698static inline
1699int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1700{
1701 if (tc >= dev->num_tc)
1702 return -EINVAL;
1703
1704 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1705 return 0;
1706}
1707
1708static inline
1709void netdev_reset_tc(struct net_device *dev)
1710{
1711 dev->num_tc = 0;
1712 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1713 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1714}
1715
1716static inline
1717int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1718{
1719 if (tc >= dev->num_tc)
1720 return -EINVAL;
1721
1722 dev->tc_to_txq[tc].count = count;
1723 dev->tc_to_txq[tc].offset = offset;
1724 return 0;
1725}
1726
1727static inline
1728int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1729{
1730 if (num_tc > TC_MAX_QUEUE)
1731 return -EINVAL;
1732
1733 dev->num_tc = num_tc;
1734 return 0;
1735}
1736
1737static inline
1738int netdev_get_num_tc(struct net_device *dev)
1739{
1740 return dev->num_tc;
1741}
1742
1743static inline
David S. Millere8a04642008-07-17 00:34:19 -07001744struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1745 unsigned int index)
1746{
1747 return &dev->_tx[index];
1748}
1749
1750static inline void netdev_for_each_tx_queue(struct net_device *dev,
1751 void (*f)(struct net_device *,
1752 struct netdev_queue *,
1753 void *),
1754 void *arg)
1755{
1756 unsigned int i;
1757
1758 for (i = 0; i < dev->num_tx_queues; i++)
1759 f(dev, &dev->_tx[i], arg);
1760}
1761
Joe Perchesf629d202013-09-26 14:48:15 -07001762struct netdev_queue *netdev_pick_tx(struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08001763 struct sk_buff *skb,
1764 void *accel_priv);
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00001765
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001766/*
1767 * Net namespace inlines
1768 */
1769static inline
1770struct net *dev_net(const struct net_device *dev)
1771{
Eric Dumazetc2d9ba92010-06-01 06:51:19 +00001772 return read_pnet(&dev->nd_net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001773}
1774
1775static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -07001776void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001777{
1778#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -07001779 release_net(dev->nd_net);
1780 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001781#endif
1782}
1783
Lennert Buytenhekcf85d082008-10-07 13:45:02 +00001784static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1785{
1786#ifdef CONFIG_NET_DSA_TAG_DSA
1787 if (dev->dsa_ptr != NULL)
1788 return dsa_uses_dsa_tags(dev->dsa_ptr);
1789#endif
1790
1791 return 0;
1792}
1793
Lennert Buytenhek396138f02008-10-07 13:46:07 +00001794static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1795{
1796#ifdef CONFIG_NET_DSA_TAG_TRAILER
1797 if (dev->dsa_ptr != NULL)
1798 return dsa_uses_trailer_tags(dev->dsa_ptr);
1799#endif
1800
1801 return 0;
1802}
1803
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001804/**
1805 * netdev_priv - access network device private data
1806 * @dev: network device
1807 *
1808 * Get network device private data
1809 */
Patrick McHardy6472ce62007-06-13 12:03:21 -07001810static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00001812 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813}
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815/* Set the sysfs physical device reference for the network logical device
1816 * if set prior to registration will cause a symlink during initialization.
1817 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001818#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Marcel Holtmann384912e2009-08-31 21:08:19 +00001820/* Set the sysfs device type for the network logical device to allow
Maxime Jayat3f794102013-10-12 01:29:46 +02001821 * fine-grained identification of different network device types. For
Marcel Holtmann384912e2009-08-31 21:08:19 +00001822 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1823 */
1824#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1825
Eric Dumazet82dc3c62013-03-05 15:57:22 +00001826/* Default NAPI poll() weight
1827 * Device drivers are strongly advised to not use bigger value
1828 */
1829#define NAPI_POLL_WEIGHT 64
1830
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07001831/**
1832 * netif_napi_add - initialize a napi context
1833 * @dev: network device
1834 * @napi: napi context
1835 * @poll: polling function
1836 * @weight: default weight
1837 *
1838 * netif_napi_add() must be used to initialize a napi context prior to calling
1839 * *any* of the other napi related functions.
1840 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001841void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1842 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001843
Alexander Duyckd8156532008-07-08 15:13:05 -07001844/**
1845 * netif_napi_del - remove a napi context
1846 * @napi: napi context
1847 *
1848 * netif_napi_del() removes a napi context from the network device napi list
1849 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001850void netif_napi_del(struct napi_struct *napi);
1851
1852struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00001853 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1854 void *frag0;
1855
Herbert Xu74895942009-05-26 18:50:27 +00001856 /* Length of frag0. */
1857 unsigned int frag0_len;
1858
Herbert Xu86911732009-01-29 14:19:50 +00001859 /* This indicates where we are processing relative to skb->data. */
1860 int data_offset;
1861
Herbert Xud565b0a2008-12-15 23:38:52 -08001862 /* This is non-zero if the packet cannot be merged with the new skb. */
Jerry Chubf5a7552014-01-07 10:23:19 -08001863 u16 flush;
1864
1865 /* Save the IP ID here and check when we get to the transport layer */
1866 u16 flush_id;
Herbert Xud565b0a2008-12-15 23:38:52 -08001867
1868 /* Number of segments aggregated. */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001869 u16 count;
1870
1871 /* This is non-zero if the packet may be of the same flow. */
1872 u8 same_flow;
Herbert Xu5d38a072009-01-04 16:13:40 -08001873
1874 /* Free the skb? */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001875 u8 free;
Eric Dumazetd7e88832012-04-30 08:10:34 +00001876#define NAPI_GRO_FREE 1
1877#define NAPI_GRO_FREE_STOLEN_HEAD 2
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001878
1879 /* jiffies when first packet was created/queued */
1880 unsigned long age;
Eric Dumazet86347242012-10-08 21:38:50 +02001881
1882 /* Used in ipv6_gro_receive() */
Or Gerlitzb582ef02014-01-20 13:59:19 +02001883 u16 proto;
1884
1885 /* Used in udp_gro_receive */
1886 u16 udp_mark;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00001887
Jerry Chubf5a7552014-01-07 10:23:19 -08001888 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1889 __wsum csum;
1890
Eric Dumazetc3c7c252012-12-06 13:54:59 +00001891 /* used in skb_gro_receive() slow path */
1892 struct sk_buff *last;
Herbert Xud565b0a2008-12-15 23:38:52 -08001893};
1894
1895#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07001896
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001898 __be16 type; /* This is really htons(ether_type). */
1899 struct net_device *dev; /* NULL is wildcarded here */
1900 int (*func) (struct sk_buff *,
1901 struct net_device *,
1902 struct packet_type *,
1903 struct net_device *);
Eric Leblondc0de08d2012-08-16 22:02:58 +00001904 bool (*id_match)(struct packet_type *ptype,
1905 struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 void *af_packet_priv;
1907 struct list_head list;
1908};
1909
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001910struct offload_callbacks {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1912 netdev_features_t features);
1913 int (*gso_send_check)(struct sk_buff *skb);
1914 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1915 struct sk_buff *skb);
Jerry Chu299603e82013-12-11 20:53:45 -08001916 int (*gro_complete)(struct sk_buff *skb, int nhoff);
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001917};
1918
1919struct packet_offload {
1920 __be16 type; /* This is really htons(ether_type). */
1921 struct offload_callbacks callbacks;
1922 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923};
1924
Or Gerlitzb582ef02014-01-20 13:59:19 +02001925struct udp_offload {
1926 __be16 port;
1927 struct offload_callbacks callbacks;
1928};
1929
Li RongQing8f849852014-01-04 13:57:59 +08001930/* often modified stats are per cpu, other are shared (netdev->stats) */
1931struct pcpu_sw_netstats {
1932 u64 rx_packets;
1933 u64 rx_bytes;
1934 u64 tx_packets;
1935 u64 tx_bytes;
1936 struct u64_stats_sync syncp;
1937};
1938
WANG Cong1c213bd2014-02-13 11:46:28 -08001939#define netdev_alloc_pcpu_stats(type) \
1940({ \
stephen hemminger693350c2014-03-10 09:41:46 -07001941 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
WANG Cong1c213bd2014-02-13 11:46:28 -08001942 if (pcpu_stats) { \
1943 int i; \
1944 for_each_possible_cpu(i) { \
1945 typeof(type) *stat; \
1946 stat = per_cpu_ptr(pcpu_stats, i); \
1947 u64_stats_init(&stat->syncp); \
1948 } \
1949 } \
1950 pcpu_stats; \
1951})
1952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953#include <linux/notifier.h>
1954
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001955/* netdevice notifier chain. Please remember to update the rtnetlink
1956 * notification exclusion list in rtnetlink_event() when adding new
1957 * types.
1958 */
1959#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1960#define NETDEV_DOWN 0x0002
1961#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1962 detected a hardware crash and restarted
1963 - we can use this eg to kick tcp sessions
1964 once done */
1965#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1966#define NETDEV_REGISTER 0x0005
1967#define NETDEV_UNREGISTER 0x0006
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01001968#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001969#define NETDEV_CHANGEADDR 0x0008
1970#define NETDEV_GOING_DOWN 0x0009
1971#define NETDEV_CHANGENAME 0x000A
1972#define NETDEV_FEAT_CHANGE 0x000B
1973#define NETDEV_BONDING_FAILOVER 0x000C
1974#define NETDEV_PRE_UP 0x000D
1975#define NETDEV_PRE_TYPE_CHANGE 0x000E
1976#define NETDEV_POST_TYPE_CHANGE 0x000F
1977#define NETDEV_POST_INIT 0x0010
Eric Dumazet0115e8e2012-08-22 17:19:46 +00001978#define NETDEV_UNREGISTER_FINAL 0x0011
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001979#define NETDEV_RELEASE 0x0012
1980#define NETDEV_NOTIFY_PEERS 0x0013
1981#define NETDEV_JOIN 0x0014
Jiri Pirko42e52bf2013-05-25 04:12:10 +00001982#define NETDEV_CHANGEUPPER 0x0015
Jiri Pirko4aa5dee2013-07-20 12:13:53 +02001983#define NETDEV_RESEND_IGMP 0x0016
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01001984#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001985
Joe Perchesf629d202013-09-26 14:48:15 -07001986int register_netdevice_notifier(struct notifier_block *nb);
1987int unregister_netdevice_notifier(struct notifier_block *nb);
Jiri Pirko351638e2013-05-28 01:30:21 +00001988
1989struct netdev_notifier_info {
1990 struct net_device *dev;
1991};
1992
Jiri Pirkobe9efd32013-05-28 01:30:22 +00001993struct netdev_notifier_change_info {
1994 struct netdev_notifier_info info; /* must be first */
1995 unsigned int flags_changed;
1996};
1997
Cong Wang75538c22013-05-29 11:30:50 +08001998static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1999 struct net_device *dev)
2000{
2001 info->dev = dev;
2002}
2003
Jiri Pirko351638e2013-05-28 01:30:21 +00002004static inline struct net_device *
2005netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2006{
2007 return info->dev;
2008}
2009
Joe Perchesf629d202013-09-26 14:48:15 -07002010int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002011
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013extern rwlock_t dev_base_lock; /* Device list lock */
2014
Eric W. Biederman881d9662007-09-17 11:56:21 -07002015#define for_each_netdev(net, d) \
2016 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
Eric W. Biedermandcbccbd42009-11-29 22:25:26 +00002017#define for_each_netdev_reverse(net, d) \
2018 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08002019#define for_each_netdev_rcu(net, d) \
2020 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002021#define for_each_netdev_safe(net, d, n) \
2022 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2023#define for_each_netdev_continue(net, d) \
2024 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
stephen hemminger254245d2009-11-10 07:54:47 +00002025#define for_each_netdev_continue_rcu(net, d) \
2026 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00002027#define for_each_netdev_in_bond_rcu(bond, slave) \
2028 for_each_netdev_rcu(&init_net, slave) \
2029 if (netdev_master_upper_dev_get_rcu(slave) == bond)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002030#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2031
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002032static inline struct net_device *next_net_device(struct net_device *dev)
2033{
2034 struct list_head *lh;
2035 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002036
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002037 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002038 lh = dev->dev_list.next;
2039 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2040}
2041
Eric Dumazetce81b762009-11-11 17:34:30 +00002042static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2043{
2044 struct list_head *lh;
2045 struct net *net;
2046
2047 net = dev_net(dev);
Eric Dumazetccf43432011-01-26 18:08:02 +00002048 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
Eric Dumazetce81b762009-11-11 17:34:30 +00002049 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2050}
2051
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002052static inline struct net_device *first_net_device(struct net *net)
2053{
2054 return list_empty(&net->dev_base_head) ? NULL :
2055 net_device_entry(net->dev_base_head.next);
2056}
Pavel Emelianov7562f872007-05-03 15:13:45 -07002057
Eric Dumazetccf43432011-01-26 18:08:02 +00002058static inline struct net_device *first_net_device_rcu(struct net *net)
2059{
2060 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2061
2062 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2063}
2064
Joe Perchesf629d202013-09-26 14:48:15 -07002065int netdev_boot_setup_check(struct net_device *dev);
2066unsigned long netdev_boot_base(const char *prefix, int unit);
2067struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2068 const char *hwaddr);
2069struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2070struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2071void dev_add_pack(struct packet_type *pt);
2072void dev_remove_pack(struct packet_type *pt);
2073void __dev_remove_pack(struct packet_type *pt);
2074void dev_add_offload(struct packet_offload *po);
2075void dev_remove_offload(struct packet_offload *po);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Joe Perchesf629d202013-09-26 14:48:15 -07002077struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
2078 unsigned short mask);
2079struct net_device *dev_get_by_name(struct net *net, const char *name);
2080struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2081struct net_device *__dev_get_by_name(struct net *net, const char *name);
2082int dev_alloc_name(struct net_device *dev, const char *name);
2083int dev_open(struct net_device *dev);
2084int dev_close(struct net_device *dev);
2085void dev_disable_lro(struct net_device *dev);
2086int dev_loopback_xmit(struct sk_buff *newskb);
2087int dev_queue_xmit(struct sk_buff *skb);
Jason Wangf663dd92014-01-10 16:18:26 +08002088int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
Joe Perchesf629d202013-09-26 14:48:15 -07002089int register_netdevice(struct net_device *dev);
2090void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2091void unregister_netdevice_many(struct list_head *head);
Eric Dumazet44a08732009-10-27 07:03:04 +00002092static inline void unregister_netdevice(struct net_device *dev)
2093{
2094 unregister_netdevice_queue(dev, NULL);
2095}
2096
Joe Perchesf629d202013-09-26 14:48:15 -07002097int netdev_refcnt_read(const struct net_device *dev);
2098void free_netdev(struct net_device *dev);
Eric Dumazet74d332c2013-10-30 13:10:44 -07002099void netdev_freemem(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07002100void synchronize_net(void);
2101int init_dummy_netdev(struct net_device *dev);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08002102
Joe Perchesf629d202013-09-26 14:48:15 -07002103struct net_device *dev_get_by_index(struct net *net, int ifindex);
2104struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2105struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2106int netdev_get_name(struct net *net, char *name, int ifindex);
2107int dev_restart(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07002108int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
Herbert Xu86911732009-01-29 14:19:50 +00002109
2110static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2111{
2112 return NAPI_GRO_CB(skb)->data_offset;
2113}
2114
2115static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2116{
2117 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2118}
2119
2120static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2121{
2122 NAPI_GRO_CB(skb)->data_offset += len;
2123}
2124
Herbert Xua5b1cf22009-05-26 18:50:28 +00002125static inline void *skb_gro_header_fast(struct sk_buff *skb,
2126 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00002127{
Herbert Xu78a478d2009-05-26 18:50:21 +00002128 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00002129}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Herbert Xua5b1cf22009-05-26 18:50:28 +00002131static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2132{
2133 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2134}
2135
2136static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2137 unsigned int offset)
2138{
Herbert Xu17dd7592011-07-27 06:16:28 -07002139 if (!pskb_may_pull(skb, hlen))
2140 return NULL;
2141
Herbert Xua5b1cf22009-05-26 18:50:28 +00002142 NAPI_GRO_CB(skb)->frag0 = NULL;
2143 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu17dd7592011-07-27 06:16:28 -07002144 return skb->data + offset;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002145}
2146
Herbert Xu36e7b1b2009-04-27 05:44:45 -07002147static inline void *skb_gro_network_header(struct sk_buff *skb)
2148{
Herbert Xu78d3fd02009-05-26 18:50:23 +00002149 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2150 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07002151}
2152
Jerry Chubf5a7552014-01-07 10:23:19 -08002153static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2154 const void *start, unsigned int len)
2155{
2156 if (skb->ip_summed == CHECKSUM_COMPLETE)
2157 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2158 csum_partial(start, len, 0));
2159}
2160
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002161static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2162 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002163 const void *daddr, const void *saddr,
Eric Dumazet95c96172012-04-15 05:58:06 +00002164 unsigned int len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002165{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02002166 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002167 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002168
2169 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002170}
2171
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002172static inline int dev_parse_header(const struct sk_buff *skb,
2173 unsigned char *haddr)
2174{
2175 const struct net_device *dev = skb->dev;
2176
Patrick McHardy1b833362007-10-18 05:09:28 -07002177 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002178 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002179 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002180}
2181
David S. Miller22053692013-12-31 16:23:35 -05002182static inline int dev_rebuild_header(struct sk_buff *skb)
2183{
2184 const struct net_device *dev = skb->dev;
2185
2186 if (!dev->header_ops || !dev->header_ops->rebuild)
2187 return 0;
2188 return dev->header_ops->rebuild(skb);
2189}
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
Joe Perchesf629d202013-09-26 14:48:15 -07002192int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193static inline int unregister_gifconf(unsigned int family)
2194{
2195 return register_gifconf(family, NULL);
2196}
2197
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002198#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04002199#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002200struct sd_flow_limit {
2201 u64 count;
2202 unsigned int num_buckets;
2203 unsigned int history_head;
2204 u16 history[FLOW_LIMIT_HISTORY];
2205 u8 buckets[];
2206};
2207
2208extern int netdev_flow_limit_table_len;
2209#endif /* CONFIG_NET_FLOW_LIMIT */
2210
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211/*
Eric Dumazet88751272010-04-19 05:07:33 +00002212 * Incoming packets are placed on per-cpu queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08002214struct softnet_data {
David S. Miller37437bb2008-07-16 02:15:04 -07002215 struct Qdisc *output_queue;
Changli Gaoa9cbd582010-04-26 23:06:24 +00002216 struct Qdisc **output_queue_tailp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 struct sk_buff *completion_queue;
Changli Gao6e7676c2010-04-27 15:07:33 -07002219 struct sk_buff_head process_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Changli Gaodee42872010-05-02 05:42:16 +00002221 /* stats */
David S. Millercd7b5392010-05-02 22:27:59 -07002222 unsigned int processed;
2223 unsigned int time_squeeze;
2224 unsigned int cpu_collision;
2225 unsigned int received_rps;
Changli Gaodee42872010-05-02 05:42:16 +00002226
Changli Gaofd793d82010-04-15 00:16:59 -07002227#ifdef CONFIG_RPS
Eric Dumazet88751272010-04-19 05:07:33 +00002228 struct softnet_data *rps_ipi_list;
2229
2230 /* Elements below can be accessed between CPUs for RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00002231 struct call_single_data csd ____cacheline_aligned_in_smp;
Eric Dumazet88751272010-04-19 05:07:33 +00002232 struct softnet_data *rps_ipi_next;
2233 unsigned int cpu;
Tom Herbertfec5e652010-04-16 16:01:27 -07002234 unsigned int input_queue_head;
Tom Herbert76cc8b12010-05-20 18:37:59 +00002235 unsigned int input_queue_tail;
Tom Herbert1e94d722010-03-18 17:45:44 -07002236#endif
Eric Dumazet95c96172012-04-15 05:58:06 +00002237 unsigned int dropped;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002238 struct sk_buff_head input_pkt_queue;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002239 struct napi_struct backlog;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002240
2241#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04002242 struct sd_flow_limit __rcu *flow_limit;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002243#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244};
2245
Tom Herbert76cc8b12010-05-20 18:37:59 +00002246static inline void input_queue_head_incr(struct softnet_data *sd)
Tom Herbertfec5e652010-04-16 16:01:27 -07002247{
2248#ifdef CONFIG_RPS
Tom Herbert76cc8b12010-05-20 18:37:59 +00002249 sd->input_queue_head++;
2250#endif
2251}
2252
2253static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2254 unsigned int *qtail)
2255{
2256#ifdef CONFIG_RPS
2257 *qtail = ++sd->input_queue_tail;
Tom Herbertfec5e652010-04-16 16:01:27 -07002258#endif
2259}
2260
Tom Herbert0a9627f2010-03-16 08:03:29 +00002261DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
Joe Perchesf629d202013-09-26 14:48:15 -07002263void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -07002264
2265static inline void netif_schedule_queue(struct netdev_queue *txq)
2266{
Tom Herbert734664982011-11-28 16:32:44 +00002267 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
David S. Miller37437bb2008-07-16 02:15:04 -07002268 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -07002269}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002271static inline void netif_tx_schedule_all(struct net_device *dev)
2272{
2273 unsigned int i;
2274
2275 for (i = 0; i < dev->num_tx_queues; i++)
2276 netif_schedule_queue(netdev_get_tx_queue(dev, i));
2277}
2278
Dave Jonesd29f7492008-07-22 14:09:06 -07002279static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
2280{
Tom Herbert734664982011-11-28 16:32:44 +00002281 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07002282}
2283
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002284/**
2285 * netif_start_queue - allow transmit
2286 * @dev: network device
2287 *
2288 * Allow upper layers to call the device hard_start_xmit routine.
2289 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290static inline void netif_start_queue(struct net_device *dev)
2291{
David S. Millere8a04642008-07-17 00:34:19 -07002292 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293}
2294
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002295static inline void netif_tx_start_all_queues(struct net_device *dev)
2296{
2297 unsigned int i;
2298
2299 for (i = 0; i < dev->num_tx_queues; i++) {
2300 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2301 netif_tx_start_queue(txq);
2302 }
2303}
2304
David S. Miller79d16382008-07-08 23:14:46 -07002305static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306{
Tom Herbert734664982011-11-28 16:32:44 +00002307 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07002308 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07002309}
2310
Dave Jonesd29f7492008-07-22 14:09:06 -07002311/**
2312 * netif_wake_queue - restart transmit
2313 * @dev: network device
2314 *
2315 * Allow upper layers to call the device hard_start_xmit routine.
2316 * Used for flow control when transmit resources are available.
2317 */
David S. Miller79d16382008-07-08 23:14:46 -07002318static inline void netif_wake_queue(struct net_device *dev)
2319{
David S. Millere8a04642008-07-17 00:34:19 -07002320 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321}
2322
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002323static inline void netif_tx_wake_all_queues(struct net_device *dev)
2324{
2325 unsigned int i;
2326
2327 for (i = 0; i < dev->num_tx_queues; i++) {
2328 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2329 netif_tx_wake_queue(txq);
2330 }
2331}
2332
Dave Jonesd29f7492008-07-22 14:09:06 -07002333static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2334{
Guillaume Chazarain18543a62010-11-06 06:39:32 +00002335 if (WARN_ON(!dev_queue)) {
Joe Perches256ee432011-03-01 07:06:12 +00002336 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
Guillaume Chazarain18543a62010-11-06 06:39:32 +00002337 return;
2338 }
Tom Herbert734664982011-11-28 16:32:44 +00002339 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07002340}
2341
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002342/**
2343 * netif_stop_queue - stop transmitted packets
2344 * @dev: network device
2345 *
2346 * Stop upper layers calling the device hard_start_xmit routine.
2347 * Used for flow control when transmit resources are unavailable.
2348 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349static inline void netif_stop_queue(struct net_device *dev)
2350{
David S. Millere8a04642008-07-17 00:34:19 -07002351 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352}
2353
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002354static inline void netif_tx_stop_all_queues(struct net_device *dev)
2355{
2356 unsigned int i;
2357
2358 for (i = 0; i < dev->num_tx_queues; i++) {
2359 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2360 netif_tx_stop_queue(txq);
2361 }
2362}
2363
David S. Miller4d295152012-03-07 21:02:35 -05002364static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07002365{
Tom Herbert734664982011-11-28 16:32:44 +00002366 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07002367}
2368
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002369/**
2370 * netif_queue_stopped - test if transmit queue is flowblocked
2371 * @dev: network device
2372 *
2373 * Test if transmit queue on device is currently unable to send.
2374 */
David S. Miller4d295152012-03-07 21:02:35 -05002375static inline bool netif_queue_stopped(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376{
David S. Millere8a04642008-07-17 00:34:19 -07002377 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378}
2379
David S. Miller4d295152012-03-07 21:02:35 -05002380static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
David S. Millerc3f26a22008-07-31 16:58:50 -07002381{
Tom Herbert734664982011-11-28 16:32:44 +00002382 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2383}
2384
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +02002385static inline bool
2386netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
Tom Herbert734664982011-11-28 16:32:44 +00002387{
2388 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2389}
2390
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +02002391static inline bool
2392netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2393{
2394 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2395}
2396
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002397static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2398 unsigned int bytes)
2399{
Tom Herbert114cf582011-11-28 16:33:09 +00002400#ifdef CONFIG_BQL
2401 dql_queued(&dev_queue->dql, bytes);
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002402
2403 if (likely(dql_avail(&dev_queue->dql) >= 0))
2404 return;
2405
2406 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2407
2408 /*
2409 * The XOFF flag must be set before checking the dql_avail below,
2410 * because in netdev_tx_completed_queue we update the dql_completed
2411 * before checking the XOFF flag.
2412 */
2413 smp_mb();
2414
2415 /* check again in case another CPU has just made room avail */
2416 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2417 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002418#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002419}
2420
Florian Fainelli0042d0c2013-09-06 16:58:00 +01002421/**
2422 * netdev_sent_queue - report the number of bytes queued to hardware
2423 * @dev: network device
2424 * @bytes: number of bytes queued to the hardware device queue
2425 *
2426 * Report the number of bytes queued for sending/completion to the network
2427 * device hardware queue. @bytes should be a good approximation and should
2428 * exactly match netdev_completed_queue() @bytes
2429 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002430static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2431{
2432 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2433}
2434
2435static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
Eric Dumazet95c96172012-04-15 05:58:06 +00002436 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002437{
Tom Herbert114cf582011-11-28 16:33:09 +00002438#ifdef CONFIG_BQL
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002439 if (unlikely(!bytes))
2440 return;
2441
2442 dql_completed(&dev_queue->dql, bytes);
2443
2444 /*
2445 * Without the memory barrier there is a small possiblity that
2446 * netdev_tx_sent_queue will miss the update and cause the queue to
2447 * be stopped forever
2448 */
2449 smp_mb();
2450
2451 if (dql_avail(&dev_queue->dql) < 0)
2452 return;
2453
2454 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2455 netif_schedule_queue(dev_queue);
Tom Herbert114cf582011-11-28 16:33:09 +00002456#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002457}
2458
Florian Fainelli0042d0c2013-09-06 16:58:00 +01002459/**
2460 * netdev_completed_queue - report bytes and packets completed by device
2461 * @dev: network device
2462 * @pkts: actual number of packets sent over the medium
2463 * @bytes: actual number of bytes sent over the medium
2464 *
2465 * Report the number of bytes and packets transmitted by the network device
2466 * hardware queue over the physical medium, @bytes must exactly match the
2467 * @bytes amount passed to netdev_sent_queue()
2468 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002469static inline void netdev_completed_queue(struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00002470 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002471{
2472 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2473}
2474
2475static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2476{
Tom Herbert114cf582011-11-28 16:33:09 +00002477#ifdef CONFIG_BQL
Alexander Duyck5c490352012-02-07 02:29:01 +00002478 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002479 dql_reset(&q->dql);
2480#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002481}
2482
Florian Fainelli0042d0c2013-09-06 16:58:00 +01002483/**
2484 * netdev_reset_queue - reset the packets and bytes count of a network device
2485 * @dev_queue: network device
2486 *
2487 * Reset the bytes and packet count of a network device and clear the
2488 * software flow control OFF bit for this network device
2489 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002490static inline void netdev_reset_queue(struct net_device *dev_queue)
2491{
2492 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
David S. Millerc3f26a22008-07-31 16:58:50 -07002493}
2494
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002495/**
Daniel Borkmannb9507bd2014-02-16 15:55:21 +01002496 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2497 * @dev: network device
2498 * @queue_index: given tx queue index
2499 *
2500 * Returns 0 if given tx queue index >= number of device tx queues,
2501 * otherwise returns the originally passed tx queue index.
2502 */
2503static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
2504{
2505 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2506 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2507 dev->name, queue_index,
2508 dev->real_num_tx_queues);
2509 return 0;
2510 }
2511
2512 return queue_index;
2513}
2514
2515/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002516 * netif_running - test if up
2517 * @dev: network device
2518 *
2519 * Test if the device has been brought up.
2520 */
David S. Miller4d295152012-03-07 21:02:35 -05002521static inline bool netif_running(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522{
2523 return test_bit(__LINK_STATE_START, &dev->state);
2524}
2525
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002526/*
2527 * Routines to manage the subqueues on a device. We only need start
2528 * stop, and a check if it's stopped. All other device management is
2529 * done at the overall netdevice level.
2530 * Also test the device if we're multiqueue.
2531 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002532
2533/**
2534 * netif_start_subqueue - allow sending packets on subqueue
2535 * @dev: network device
2536 * @queue_index: sub queue index
2537 *
2538 * Start individual transmit queue of a device with multiple transmit queues.
2539 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002540static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2541{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002542 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002543
2544 netif_tx_start_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002545}
2546
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002547/**
2548 * netif_stop_subqueue - stop sending packets on subqueue
2549 * @dev: network device
2550 * @queue_index: sub queue index
2551 *
2552 * Stop individual transmit queue of a device with multiple transmit queues.
2553 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002554static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2555{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002556 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002557 netif_tx_stop_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002558}
2559
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002560/**
2561 * netif_subqueue_stopped - test status of subqueue
2562 * @dev: network device
2563 * @queue_index: sub queue index
2564 *
2565 * Check individual transmit queue of a device with multiple transmit queues.
2566 */
David S. Miller4d295152012-03-07 21:02:35 -05002567static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2568 u16 queue_index)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002569{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002570 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002571
2572 return netif_tx_queue_stopped(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002573}
2574
David S. Miller4d295152012-03-07 21:02:35 -05002575static inline bool netif_subqueue_stopped(const struct net_device *dev,
2576 struct sk_buff *skb)
Pavel Emelyanov668f8952007-10-21 17:01:56 -07002577{
2578 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2579}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002580
2581/**
2582 * netif_wake_subqueue - allow sending packets on subqueue
2583 * @dev: network device
2584 * @queue_index: sub queue index
2585 *
2586 * Resume individual transmit queue of a device with multiple transmit queues.
2587 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002588static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2589{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002590 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Tom Herbert734664982011-11-28 16:32:44 +00002591 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07002592 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002593}
2594
Alexander Duyck537c00d2013-01-10 08:57:02 +00002595#ifdef CONFIG_XPS
David S. Miller53af53a2013-10-08 23:07:53 -04002596int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
Joe Perchesf629d202013-09-26 14:48:15 -07002597 u16 index);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002598#else
2599static inline int netif_set_xps_queue(struct net_device *dev,
Michael S. Tsirkin35735402013-10-02 09:14:06 +03002600 const struct cpumask *mask,
Alexander Duyck537c00d2013-01-10 08:57:02 +00002601 u16 index)
2602{
2603 return 0;
2604}
2605#endif
2606
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002607/*
2608 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2609 * as a distribution range limit for the returned value.
2610 */
2611static inline u16 skb_tx_hash(const struct net_device *dev,
Tom Herbert0e001612014-07-01 21:32:27 -07002612 struct sk_buff *skb)
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002613{
2614 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2615}
2616
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002617/**
2618 * netif_is_multiqueue - test if device has multiple transmit queues
2619 * @dev: network device
2620 *
2621 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002622 */
David S. Miller4d295152012-03-07 21:02:35 -05002623static inline bool netif_is_multiqueue(const struct net_device *dev)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002624{
Eric Dumazeta02cec22010-09-22 20:43:57 +00002625 return dev->num_tx_queues > 1;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002626}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
Joe Perchesf629d202013-09-26 14:48:15 -07002628int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
John Fastabendf0796d52010-07-01 13:21:57 +00002629
Michael Daltona953be52014-01-16 22:23:28 -08002630#ifdef CONFIG_SYSFS
Joe Perchesf629d202013-09-26 14:48:15 -07002631int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002632#else
2633static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2634 unsigned int rxq)
2635{
2636 return 0;
2637}
2638#endif
2639
Ben Hutchings3171d022010-09-27 08:24:49 +00002640static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2641 const struct net_device *from_dev)
2642{
Jiri Pirkoee6ae1a2012-07-20 02:28:46 +00002643 int err;
2644
2645 err = netif_set_real_num_tx_queues(to_dev,
2646 from_dev->real_num_tx_queues);
2647 if (err)
2648 return err;
Michael Daltona953be52014-01-16 22:23:28 -08002649#ifdef CONFIG_SYSFS
Ben Hutchings3171d022010-09-27 08:24:49 +00002650 return netif_set_real_num_rx_queues(to_dev,
2651 from_dev->real_num_rx_queues);
2652#else
2653 return 0;
2654#endif
2655}
2656
Michael Daltona953be52014-01-16 22:23:28 -08002657#ifdef CONFIG_SYSFS
2658static inline unsigned int get_netdev_rx_queue_index(
2659 struct netdev_rx_queue *queue)
2660{
2661 struct net_device *dev = queue->dev;
2662 int index = queue - dev->_rx;
2663
2664 BUG_ON(index >= dev->num_rx_queues);
2665 return index;
2666}
2667#endif
2668
Yuval Mintz16917b82012-07-01 03:18:50 +00002669#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
Joe Perchesf629d202013-09-26 14:48:15 -07002670int netif_get_num_default_rss_queues(void);
Yuval Mintz16917b82012-07-01 03:18:50 +00002671
Eric Dumazete6247022013-12-05 04:45:08 -08002672enum skb_free_reason {
2673 SKB_REASON_CONSUMED,
2674 SKB_REASON_DROPPED,
2675};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
Eric Dumazete6247022013-12-05 04:45:08 -08002677void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
2678void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
2679
2680/*
2681 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2682 * interrupt context or with hardware interrupts being disabled.
2683 * (in_irq() || irqs_disabled())
2684 *
2685 * We provide four helpers that can be used in following contexts :
2686 *
2687 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2688 * replacing kfree_skb(skb)
2689 *
2690 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2691 * Typically used in place of consume_skb(skb) in TX completion path
2692 *
2693 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2694 * replacing kfree_skb(skb)
2695 *
2696 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2697 * and consumed a packet. Used in place of consume_skb(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 */
Eric Dumazete6247022013-12-05 04:45:08 -08002699static inline void dev_kfree_skb_irq(struct sk_buff *skb)
2700{
2701 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
2702}
2703
2704static inline void dev_consume_skb_irq(struct sk_buff *skb)
2705{
2706 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
2707}
2708
2709static inline void dev_kfree_skb_any(struct sk_buff *skb)
2710{
2711 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
2712}
2713
2714static inline void dev_consume_skb_any(struct sk_buff *skb)
2715{
2716 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
2717}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
Joe Perchesf629d202013-09-26 14:48:15 -07002719int netif_rx(struct sk_buff *skb);
2720int netif_rx_ni(struct sk_buff *skb);
2721int netif_receive_skb(struct sk_buff *skb);
2722gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2723void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2724struct sk_buff *napi_get_frags(struct napi_struct *napi);
2725gro_result_t napi_gro_frags(struct napi_struct *napi);
Jerry Chubf5a7552014-01-07 10:23:19 -08002726struct packet_offload *gro_find_receive_by_type(__be16 type);
2727struct packet_offload *gro_find_complete_by_type(__be16 type);
Herbert Xu76620aa2009-04-16 02:02:07 -07002728
2729static inline void napi_free_frags(struct napi_struct *napi)
2730{
2731 kfree_skb(napi->skb);
2732 napi->skb = NULL;
2733}
2734
Joe Perchesf629d202013-09-26 14:48:15 -07002735int netdev_rx_handler_register(struct net_device *dev,
2736 rx_handler_func_t *rx_handler,
2737 void *rx_handler_data);
2738void netdev_rx_handler_unregister(struct net_device *dev);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00002739
Joe Perchesf629d202013-09-26 14:48:15 -07002740bool dev_valid_name(const char *name);
2741int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2742int dev_ethtool(struct net *net, struct ifreq *);
2743unsigned int dev_get_flags(const struct net_device *);
2744int __dev_change_flags(struct net_device *, unsigned int flags);
2745int dev_change_flags(struct net_device *, unsigned int);
David S. Millercb178192013-09-30 15:36:45 -04002746void __dev_notify_flags(struct net_device *, unsigned int old_flags,
2747 unsigned int gchanges);
Joe Perchesf629d202013-09-26 14:48:15 -07002748int dev_change_name(struct net_device *, const char *);
2749int dev_set_alias(struct net_device *, const char *, size_t);
2750int dev_change_net_namespace(struct net_device *, struct net *, const char *);
2751int dev_set_mtu(struct net_device *, int);
2752void dev_set_group(struct net_device *, int);
2753int dev_set_mac_address(struct net_device *, struct sockaddr *);
2754int dev_change_carrier(struct net_device *, bool new_carrier);
2755int dev_get_phys_port_id(struct net_device *dev,
2756 struct netdev_phys_port_id *ppid);
2757int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08002758 struct netdev_queue *txq);
Herbert Xua0265d22014-04-17 13:45:03 +08002759int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
Joe Perchesf629d202013-09-26 14:48:15 -07002760int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04002761bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002763extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765/* Called by rtnetlink.c:rtnl_unlock() */
Joe Perchesf629d202013-09-26 14:48:15 -07002766void netdev_run_todo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002768/**
2769 * dev_put - release reference to device
2770 * @dev: network device
2771 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002772 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002773 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774static inline void dev_put(struct net_device *dev)
2775{
Christoph Lameter933393f2011-12-22 11:58:51 -06002776 this_cpu_dec(*dev->pcpu_refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777}
2778
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002779/**
2780 * dev_hold - get reference to device
2781 * @dev: network device
2782 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002783 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002784 */
Stephen Hemminger15333062006-03-20 22:32:28 -08002785static inline void dev_hold(struct net_device *dev)
2786{
Christoph Lameter933393f2011-12-22 11:58:51 -06002787 this_cpu_inc(*dev->pcpu_refcnt);
Stephen Hemminger15333062006-03-20 22:32:28 -08002788}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
2790/* Carrier loss detection, dial on demand. The functions netif_carrier_on
2791 * and _off may be called from IRQ context, but it is caller
2792 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08002793 *
2794 * The name carrier is inappropriate, these functions should really be
2795 * called netif_lowerlayer_*() because they represent the state of any
2796 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 */
2798
Joe Perchesf629d202013-09-26 14:48:15 -07002799void linkwatch_init_dev(struct net_device *dev);
2800void linkwatch_fire_event(struct net_device *dev);
2801void linkwatch_forget_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002803/**
2804 * netif_carrier_ok - test if carrier present
2805 * @dev: network device
2806 *
2807 * Check if carrier is present on device
2808 */
David S. Miller4d295152012-03-07 21:02:35 -05002809static inline bool netif_carrier_ok(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810{
2811 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2812}
2813
Joe Perchesf629d202013-09-26 14:48:15 -07002814unsigned long dev_trans_start(struct net_device *dev);
Eric Dumazet9d214932009-05-17 20:55:16 -07002815
Joe Perchesf629d202013-09-26 14:48:15 -07002816void __netdev_watchdog_up(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
Joe Perchesf629d202013-09-26 14:48:15 -07002818void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
Joe Perchesf629d202013-09-26 14:48:15 -07002820void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002822/**
2823 * netif_dormant_on - mark device as dormant.
2824 * @dev: network device
2825 *
2826 * Mark device as dormant (as per RFC2863).
2827 *
2828 * The dormant state indicates that the relevant interface is not
2829 * actually in a condition to pass packets (i.e., it is not 'up') but is
2830 * in a "pending" state, waiting for some external event. For "on-
2831 * demand" interfaces, this new state identifies the situation where the
2832 * interface is waiting for events to place it in the up state.
2833 *
2834 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002835static inline void netif_dormant_on(struct net_device *dev)
2836{
2837 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2838 linkwatch_fire_event(dev);
2839}
2840
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002841/**
2842 * netif_dormant_off - set device as not dormant.
2843 * @dev: network device
2844 *
2845 * Device is not in dormant state.
2846 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002847static inline void netif_dormant_off(struct net_device *dev)
2848{
2849 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2850 linkwatch_fire_event(dev);
2851}
2852
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002853/**
2854 * netif_dormant - test if carrier present
2855 * @dev: network device
2856 *
2857 * Check if carrier is present on device
2858 */
David S. Miller4d295152012-03-07 21:02:35 -05002859static inline bool netif_dormant(const struct net_device *dev)
Stefan Rompfb00055a2006-03-20 17:09:11 -08002860{
2861 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2862}
2863
2864
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002865/**
2866 * netif_oper_up - test if device is operational
2867 * @dev: network device
2868 *
2869 * Check if carrier is operational
2870 */
David S. Miller4d295152012-03-07 21:02:35 -05002871static inline bool netif_oper_up(const struct net_device *dev)
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08002872{
Stefan Rompfb00055a2006-03-20 17:09:11 -08002873 return (dev->operstate == IF_OPER_UP ||
2874 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2875}
2876
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002877/**
2878 * netif_device_present - is device available or removed
2879 * @dev: network device
2880 *
2881 * Check if device has not been removed from system.
2882 */
David S. Miller4d295152012-03-07 21:02:35 -05002883static inline bool netif_device_present(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884{
2885 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2886}
2887
Joe Perchesf629d202013-09-26 14:48:15 -07002888void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889
Joe Perchesf629d202013-09-26 14:48:15 -07002890void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891
2892/*
2893 * Network interface message level settings
2894 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895
2896enum {
2897 NETIF_MSG_DRV = 0x0001,
2898 NETIF_MSG_PROBE = 0x0002,
2899 NETIF_MSG_LINK = 0x0004,
2900 NETIF_MSG_TIMER = 0x0008,
2901 NETIF_MSG_IFDOWN = 0x0010,
2902 NETIF_MSG_IFUP = 0x0020,
2903 NETIF_MSG_RX_ERR = 0x0040,
2904 NETIF_MSG_TX_ERR = 0x0080,
2905 NETIF_MSG_TX_QUEUED = 0x0100,
2906 NETIF_MSG_INTR = 0x0200,
2907 NETIF_MSG_TX_DONE = 0x0400,
2908 NETIF_MSG_RX_STATUS = 0x0800,
2909 NETIF_MSG_PKTDATA = 0x1000,
2910 NETIF_MSG_HW = 0x2000,
2911 NETIF_MSG_WOL = 0x4000,
2912};
2913
2914#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2915#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2916#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2917#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2918#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2919#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2920#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2921#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2922#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2923#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2924#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2925#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2926#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2927#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2928#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2929
2930static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2931{
2932 /* use default */
2933 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2934 return default_msg_enable_bits;
2935 if (debug_value == 0) /* no output */
2936 return 0;
2937 /* set low N bits */
2938 return (1 << debug_value) - 1;
2939}
2940
David S. Millerc773e842008-07-08 23:13:53 -07002941static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07002942{
David S. Millerc773e842008-07-08 23:13:53 -07002943 spin_lock(&txq->_xmit_lock);
2944 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002945}
2946
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002947static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2948{
2949 spin_lock_bh(&txq->_xmit_lock);
2950 txq->xmit_lock_owner = smp_processor_id();
2951}
2952
David S. Miller4d295152012-03-07 21:02:35 -05002953static inline bool __netif_tx_trylock(struct netdev_queue *txq)
David S. Millerc773e842008-07-08 23:13:53 -07002954{
David S. Miller4d295152012-03-07 21:02:35 -05002955 bool ok = spin_trylock(&txq->_xmit_lock);
David S. Millerc773e842008-07-08 23:13:53 -07002956 if (likely(ok))
2957 txq->xmit_lock_owner = smp_processor_id();
2958 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07002959}
2960
David S. Millerc773e842008-07-08 23:13:53 -07002961static inline void __netif_tx_unlock(struct netdev_queue *txq)
2962{
2963 txq->xmit_lock_owner = -1;
2964 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07002965}
2966
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002967static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2968{
2969 txq->xmit_lock_owner = -1;
2970 spin_unlock_bh(&txq->_xmit_lock);
2971}
2972
Eric Dumazet08baf562009-05-25 22:58:01 -07002973static inline void txq_trans_update(struct netdev_queue *txq)
2974{
2975 if (txq->xmit_lock_owner != -1)
2976 txq->trans_start = jiffies;
2977}
2978
David S. Millerc3f26a22008-07-31 16:58:50 -07002979/**
2980 * netif_tx_lock - grab network device transmit lock
2981 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07002982 *
2983 * Get network device transmit lock
2984 */
2985static inline void netif_tx_lock(struct net_device *dev)
2986{
2987 unsigned int i;
2988 int cpu;
2989
2990 spin_lock(&dev->tx_global_lock);
2991 cpu = smp_processor_id();
2992 for (i = 0; i < dev->num_tx_queues; i++) {
2993 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2994
2995 /* We are the only thread of execution doing a
2996 * freeze, but we have to grab the _xmit_lock in
2997 * order to synchronize with threads which are in
2998 * the ->hard_start_xmit() handler and already
2999 * checked the frozen bit.
3000 */
3001 __netif_tx_lock(txq, cpu);
3002 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3003 __netif_tx_unlock(txq);
3004 }
3005}
3006
3007static inline void netif_tx_lock_bh(struct net_device *dev)
3008{
3009 local_bh_disable();
3010 netif_tx_lock(dev);
3011}
3012
Herbert Xu932ff272006-06-09 12:20:56 -07003013static inline void netif_tx_unlock(struct net_device *dev)
3014{
David S. Millere8a04642008-07-17 00:34:19 -07003015 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07003016
David S. Millere8a04642008-07-17 00:34:19 -07003017 for (i = 0; i < dev->num_tx_queues; i++) {
3018 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07003019
David S. Millerc3f26a22008-07-31 16:58:50 -07003020 /* No need to grab the _xmit_lock here. If the
3021 * queue is not stopped for another reason, we
3022 * force a schedule.
3023 */
3024 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003025 netif_schedule_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07003026 }
3027 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07003028}
3029
3030static inline void netif_tx_unlock_bh(struct net_device *dev)
3031{
David S. Millere8a04642008-07-17 00:34:19 -07003032 netif_tx_unlock(dev);
3033 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07003034}
3035
David S. Millerc773e842008-07-08 23:13:53 -07003036#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003037 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07003038 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003039 } \
3040}
3041
Eric W. Biederman5efeac42014-03-27 15:42:20 -07003042#define HARD_TX_TRYLOCK(dev, txq) \
3043 (((dev->features & NETIF_F_LLTX) == 0) ? \
3044 __netif_tx_trylock(txq) : \
3045 true )
3046
David S. Millerc773e842008-07-08 23:13:53 -07003047#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003048 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07003049 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003050 } \
3051}
3052
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053static inline void netif_tx_disable(struct net_device *dev)
3054{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003055 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07003056 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003057
David S. Millerc3f26a22008-07-31 16:58:50 -07003058 local_bh_disable();
3059 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003060 for (i = 0; i < dev->num_tx_queues; i++) {
3061 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07003062
3063 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003064 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07003065 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003066 }
David S. Millerc3f26a22008-07-31 16:58:50 -07003067 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068}
3069
David S. Millere308a5d2008-07-15 00:13:44 -07003070static inline void netif_addr_lock(struct net_device *dev)
3071{
3072 spin_lock(&dev->addr_list_lock);
3073}
3074
Jiri Pirko2429f7a2012-01-09 06:36:54 +00003075static inline void netif_addr_lock_nested(struct net_device *dev)
3076{
Vlad Yasevich25175ba2014-05-16 17:04:54 -04003077 int subclass = SINGLE_DEPTH_NESTING;
3078
3079 if (dev->netdev_ops->ndo_get_lock_subclass)
3080 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3081
3082 spin_lock_nested(&dev->addr_list_lock, subclass);
Jiri Pirko2429f7a2012-01-09 06:36:54 +00003083}
3084
David S. Millere308a5d2008-07-15 00:13:44 -07003085static inline void netif_addr_lock_bh(struct net_device *dev)
3086{
3087 spin_lock_bh(&dev->addr_list_lock);
3088}
3089
3090static inline void netif_addr_unlock(struct net_device *dev)
3091{
3092 spin_unlock(&dev->addr_list_lock);
3093}
3094
3095static inline void netif_addr_unlock_bh(struct net_device *dev)
3096{
3097 spin_unlock_bh(&dev->addr_list_lock);
3098}
3099
Jiri Pirkof001fde2009-05-05 02:48:28 +00003100/*
Jiri Pirko31278e72009-06-17 01:12:19 +00003101 * dev_addrs walker. Should be used only for read access. Call with
Jiri Pirkof001fde2009-05-05 02:48:28 +00003102 * rcu_read_lock held.
3103 */
3104#define for_each_dev_addr(dev, ha) \
Jiri Pirko31278e72009-06-17 01:12:19 +00003105 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003106
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107/* These functions live elsewhere (drivers/net/net_init.c, but related) */
3108
Joe Perchesf629d202013-09-26 14:48:15 -07003109void ether_setup(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110
3111/* Support for loadable net-drivers */
Joe Perchesf629d202013-09-26 14:48:15 -07003112struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02003113 unsigned char name_assign_type,
Joe Perchesf629d202013-09-26 14:48:15 -07003114 void (*setup)(struct net_device *),
3115 unsigned int txqs, unsigned int rxqs);
Tom Gundersenc835a672014-07-14 16:37:24 +02003116#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3117 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
Tom Herbert36909ea2011-01-09 19:36:31 +00003118
Tom Gundersenc835a672014-07-14 16:37:24 +02003119#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
3120 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
3121 count)
Tom Herbert36909ea2011-01-09 19:36:31 +00003122
Joe Perchesf629d202013-09-26 14:48:15 -07003123int register_netdev(struct net_device *dev);
3124void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003125
Jiri Pirko22bedad32010-04-01 21:22:57 +00003126/* General hardware address lists handling functions */
Joe Perchesf629d202013-09-26 14:48:15 -07003127int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3128 struct netdev_hw_addr_list *from_list, int addr_len);
3129void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3130 struct netdev_hw_addr_list *from_list, int addr_len);
Alexander Duyck670e5b82014-05-28 18:44:46 -07003131int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
3132 struct net_device *dev,
3133 int (*sync)(struct net_device *, const unsigned char *),
3134 int (*unsync)(struct net_device *,
3135 const unsigned char *));
3136void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
3137 struct net_device *dev,
3138 int (*unsync)(struct net_device *,
3139 const unsigned char *));
Joe Perchesf629d202013-09-26 14:48:15 -07003140void __hw_addr_init(struct netdev_hw_addr_list *list);
Jiri Pirko22bedad32010-04-01 21:22:57 +00003141
Jiri Pirkof001fde2009-05-05 02:48:28 +00003142/* Functions used for device addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07003143int dev_addr_add(struct net_device *dev, const unsigned char *addr,
3144 unsigned char addr_type);
3145int dev_addr_del(struct net_device *dev, const unsigned char *addr,
3146 unsigned char addr_type);
Joe Perchesf629d202013-09-26 14:48:15 -07003147void dev_addr_flush(struct net_device *dev);
3148int dev_addr_init(struct net_device *dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00003149
3150/* Functions used for unicast addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07003151int dev_uc_add(struct net_device *dev, const unsigned char *addr);
3152int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
3153int dev_uc_del(struct net_device *dev, const unsigned char *addr);
3154int dev_uc_sync(struct net_device *to, struct net_device *from);
3155int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
3156void dev_uc_unsync(struct net_device *to, struct net_device *from);
3157void dev_uc_flush(struct net_device *dev);
3158void dev_uc_init(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003159
Alexander Duyck670e5b82014-05-28 18:44:46 -07003160/**
3161 * __dev_uc_sync - Synchonize device's unicast list
3162 * @dev: device to sync
3163 * @sync: function to call if address should be added
3164 * @unsync: function to call if address should be removed
3165 *
3166 * Add newly added addresses to the interface, and release
3167 * addresses that have been deleted.
3168 **/
3169static inline int __dev_uc_sync(struct net_device *dev,
3170 int (*sync)(struct net_device *,
3171 const unsigned char *),
3172 int (*unsync)(struct net_device *,
3173 const unsigned char *))
3174{
3175 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
3176}
3177
3178/**
Masanari Iidae793c0f2014-09-04 23:44:36 +09003179 * __dev_uc_unsync - Remove synchronized addresses from device
Alexander Duyck670e5b82014-05-28 18:44:46 -07003180 * @dev: device to sync
3181 * @unsync: function to call if address should be removed
3182 *
3183 * Remove all addresses that were added to the device by dev_uc_sync().
3184 **/
3185static inline void __dev_uc_unsync(struct net_device *dev,
3186 int (*unsync)(struct net_device *,
3187 const unsigned char *))
3188{
3189 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
3190}
3191
Jiri Pirko22bedad32010-04-01 21:22:57 +00003192/* Functions used for multicast addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07003193int dev_mc_add(struct net_device *dev, const unsigned char *addr);
3194int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
3195int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
3196int dev_mc_del(struct net_device *dev, const unsigned char *addr);
3197int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
3198int dev_mc_sync(struct net_device *to, struct net_device *from);
3199int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
3200void dev_mc_unsync(struct net_device *to, struct net_device *from);
3201void dev_mc_flush(struct net_device *dev);
3202void dev_mc_init(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08003203
Alexander Duyck670e5b82014-05-28 18:44:46 -07003204/**
3205 * __dev_mc_sync - Synchonize device's multicast list
3206 * @dev: device to sync
3207 * @sync: function to call if address should be added
3208 * @unsync: function to call if address should be removed
3209 *
3210 * Add newly added addresses to the interface, and release
3211 * addresses that have been deleted.
3212 **/
3213static inline int __dev_mc_sync(struct net_device *dev,
3214 int (*sync)(struct net_device *,
3215 const unsigned char *),
3216 int (*unsync)(struct net_device *,
3217 const unsigned char *))
3218{
3219 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
3220}
3221
3222/**
Masanari Iidae793c0f2014-09-04 23:44:36 +09003223 * __dev_mc_unsync - Remove synchronized addresses from device
Alexander Duyck670e5b82014-05-28 18:44:46 -07003224 * @dev: device to sync
3225 * @unsync: function to call if address should be removed
3226 *
3227 * Remove all addresses that were added to the device by dev_mc_sync().
3228 **/
3229static inline void __dev_mc_unsync(struct net_device *dev,
3230 int (*unsync)(struct net_device *,
3231 const unsigned char *))
3232{
3233 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
3234}
3235
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236/* Functions used for secondary unicast and multicast support */
Joe Perchesf629d202013-09-26 14:48:15 -07003237void dev_set_rx_mode(struct net_device *dev);
3238void __dev_set_rx_mode(struct net_device *dev);
3239int dev_set_promiscuity(struct net_device *dev, int inc);
3240int dev_set_allmulti(struct net_device *dev, int inc);
3241void netdev_state_change(struct net_device *dev);
3242void netdev_notify_peers(struct net_device *dev);
3243void netdev_features_change(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08003244/* Load a device via the kmod */
Joe Perchesf629d202013-09-26 14:48:15 -07003245void dev_load(struct net *net, const char *name);
3246struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
3247 struct rtnl_link_stats64 *storage);
3248void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3249 const struct net_device_stats *netdev_stats);
Herbert Xufb286bb2005-11-10 13:01:24 -08003250
3251extern int netdev_max_backlog;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003252extern int netdev_tstamp_prequeue;
Herbert Xufb286bb2005-11-10 13:01:24 -08003253extern int weight_p;
Eric Dumazet0a148422011-04-20 09:27:32 +00003254extern int bpf_jit_enable;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00003255
Joe Perchesf629d202013-09-26 14:48:15 -07003256bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
Vlad Yasevich44a40852014-05-16 17:20:38 -04003257struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3258 struct list_head **iter);
Joe Perchesf629d202013-09-26 14:48:15 -07003259struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3260 struct list_head **iter);
Veaceslav Falico8b5be852013-08-28 23:25:08 +02003261
3262/* iterate through upper list, must be called under RCU read lock */
Vlad Yasevich44a40852014-05-16 17:20:38 -04003263#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3264 for (iter = &(dev)->adj_list.upper, \
3265 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3266 updev; \
3267 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3268
3269/* iterate through upper list, must be called under RCU read lock */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02003270#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3271 for (iter = &(dev)->all_adj_list.upper, \
3272 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
3273 updev; \
3274 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
Veaceslav Falico8b5be852013-08-28 23:25:08 +02003275
Joe Perchesf629d202013-09-26 14:48:15 -07003276void *netdev_lower_get_next_private(struct net_device *dev,
3277 struct list_head **iter);
3278void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3279 struct list_head **iter);
Veaceslav Falico31088a12013-09-25 09:20:12 +02003280
3281#define netdev_for_each_lower_private(dev, priv, iter) \
3282 for (iter = (dev)->adj_list.lower.next, \
3283 priv = netdev_lower_get_next_private(dev, &(iter)); \
3284 priv; \
3285 priv = netdev_lower_get_next_private(dev, &(iter)))
3286
3287#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
3288 for (iter = &(dev)->adj_list.lower, \
3289 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
3290 priv; \
3291 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3292
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04003293void *netdev_lower_get_next(struct net_device *dev,
3294 struct list_head **iter);
3295#define netdev_for_each_lower_dev(dev, ldev, iter) \
3296 for (iter = &(dev)->adj_list.lower, \
3297 ldev = netdev_lower_get_next(dev, &(iter)); \
3298 ldev; \
3299 ldev = netdev_lower_get_next(dev, &(iter)))
3300
Joe Perchesf629d202013-09-26 14:48:15 -07003301void *netdev_adjacent_get_private(struct list_head *adj_list);
dingtianhonge001bfa2013-12-13 10:19:55 +08003302void *netdev_lower_get_first_private_rcu(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07003303struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
3304struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
3305int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
3306int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko9ff162a2013-01-03 22:48:49 +00003307 struct net_device *upper_dev);
Joe Perchesf629d202013-09-26 14:48:15 -07003308int netdev_master_upper_dev_link_private(struct net_device *dev,
3309 struct net_device *upper_dev,
3310 void *private);
3311void netdev_upper_dev_unlink(struct net_device *dev,
3312 struct net_device *upper_dev);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01003313void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
Joe Perchesf629d202013-09-26 14:48:15 -07003314void *netdev_lower_dev_get_private(struct net_device *dev,
3315 struct net_device *lower_dev);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04003316int dev_get_nest_level(struct net_device *dev,
3317 bool (*type_check)(struct net_device *dev));
Joe Perchesf629d202013-09-26 14:48:15 -07003318int skb_checksum_help(struct sk_buff *skb);
3319struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3320 netdev_features_t features, bool tx_path);
3321struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3322 netdev_features_t features);
Cong Wang12b00042013-02-05 16:36:38 +00003323
3324static inline
3325struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3326{
3327 return __skb_gso_segment(skb, features, true);
3328}
Vlad Yasevich53d64712014-03-27 17:26:18 -04003329__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003330
3331static inline bool can_checksum_protocol(netdev_features_t features,
3332 __be16 protocol)
3333{
3334 return ((features & NETIF_F_GEN_CSUM) ||
3335 ((features & NETIF_F_V4_CSUM) &&
3336 protocol == htons(ETH_P_IP)) ||
3337 ((features & NETIF_F_V6_CSUM) &&
3338 protocol == htons(ETH_P_IPV6)) ||
3339 ((features & NETIF_F_FCOE_CRC) &&
3340 protocol == htons(ETH_P_FCOE)));
3341}
Cong Wang12b00042013-02-05 16:36:38 +00003342
Herbert Xufb286bb2005-11-10 13:01:24 -08003343#ifdef CONFIG_BUG
Joe Perchesf629d202013-09-26 14:48:15 -07003344void netdev_rx_csum_fault(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08003345#else
3346static inline void netdev_rx_csum_fault(struct net_device *dev)
3347{
3348}
3349#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350/* rx skb timestamps */
Joe Perchesf629d202013-09-26 14:48:15 -07003351void net_enable_timestamp(void);
3352void net_disable_timestamp(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03003354#ifdef CONFIG_PROC_FS
Joe Perchesf629d202013-09-26 14:48:15 -07003355int __init dev_proc_init(void);
Cong Wang900ff8c2013-02-18 19:20:33 +00003356#else
3357#define dev_proc_init() 0
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03003358#endif
3359
Linus Torvalds42a2d922013-11-13 17:40:34 +09003360int netdev_class_create_file_ns(struct class_attribute *class_attr,
3361 const void *ns);
3362void netdev_class_remove_file_ns(struct class_attribute *class_attr,
3363 const void *ns);
Tejun Heo58292cbe2013-09-11 22:29:04 -04003364
3365static inline int netdev_class_create_file(struct class_attribute *class_attr)
3366{
3367 return netdev_class_create_file_ns(class_attr, NULL);
3368}
3369
3370static inline void netdev_class_remove_file(struct class_attribute *class_attr)
3371{
3372 netdev_class_remove_file_ns(class_attr, NULL);
3373}
Jay Vosburghb8a97872008-06-13 18:12:04 -07003374
Johannes Berg04600792010-08-05 17:45:15 +02003375extern struct kobj_ns_type_operations net_ns_type_operations;
3376
Joe Perchesf629d202013-09-26 14:48:15 -07003377const char *netdev_drivername(const struct net_device *dev);
Arjan van de Ven6579e572008-07-21 13:31:48 -07003378
Joe Perchesf629d202013-09-26 14:48:15 -07003379void linkwatch_run_queue(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03003380
Michal Kubečekda081432014-05-20 08:29:25 +02003381static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
3382 netdev_features_t f2)
3383{
3384 if (f1 & NETIF_F_GEN_CSUM)
3385 f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3386 if (f2 & NETIF_F_GEN_CSUM)
3387 f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3388 f1 &= f2;
3389 if (f1 & NETIF_F_GEN_CSUM)
3390 f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
3391
3392 return f1;
3393}
3394
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003395static inline netdev_features_t netdev_get_wanted_features(
3396 struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00003397{
3398 return (dev->features & ~dev->hw_features) | dev->wanted_features;
3399}
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003400netdev_features_t netdev_increment_features(netdev_features_t all,
3401 netdev_features_t one, netdev_features_t mask);
Eric Dumazetb0ce3502013-05-16 07:34:53 +00003402
3403/* Allow TSO being used on stacked device :
3404 * Performing the GSO segmentation before last device
3405 * is a performance improvement.
3406 */
3407static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
3408 netdev_features_t mask)
3409{
3410 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
3411}
3412
Michał Mirosław6cb6a272011-04-02 22:48:47 -07003413int __netdev_update_features(struct net_device *dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00003414void netdev_update_features(struct net_device *dev);
Michał Mirosławafe12cc2011-05-07 03:22:17 +00003415void netdev_change_features(struct net_device *dev);
Herbert Xu7f353bf2007-08-10 15:47:58 -07003416
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08003417void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3418 struct net_device *dev);
3419
Florian Westphalc1e756b2014-05-05 15:00:44 +02003420netdev_features_t netif_skb_features(struct sk_buff *skb);
Jesse Gross58e998c2010-10-29 12:14:55 +00003421
David S. Miller4d295152012-03-07 21:02:35 -05003422static inline bool net_gso_ok(netdev_features_t features, int gso_type)
Herbert Xubcd76112006-06-30 13:36:35 -07003423{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003424 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
Michał Mirosław0345e182011-11-16 14:05:33 +00003425
3426 /* check flags correspondence */
3427 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
3428 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
3429 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
3430 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
3431 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
3432 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
Tom Herbert4b282522014-06-14 23:23:52 -07003433 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
3434 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
3435 BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
3436 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
3437 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
3438 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3439 BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
Michał Mirosław0345e182011-11-16 14:05:33 +00003440
Herbert Xubcd76112006-06-30 13:36:35 -07003441 return (features & feature) == feature;
3442}
3443
David S. Miller4d295152012-03-07 21:02:35 -05003444static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
Herbert Xu576a30e2006-06-27 13:22:38 -07003445{
Herbert Xu278b2512009-06-03 21:20:51 -07003446 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
David S. Miller21dc3302010-08-23 00:13:46 -07003447 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07003448}
3449
David S. Miller4d295152012-03-07 21:02:35 -05003450static inline bool netif_needs_gso(struct sk_buff *skb,
3451 netdev_features_t features)
Herbert Xu79671682006-06-22 02:40:14 -07003452{
Jesse Grossfc741212011-01-09 06:23:32 +00003453 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
Yi Zoucdbee742012-03-16 23:08:11 +00003454 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
3455 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
Herbert Xu79671682006-06-22 02:40:14 -07003456}
3457
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07003458static inline void netif_set_gso_max_size(struct net_device *dev,
3459 unsigned int size)
3460{
3461 dev->gso_max_size = size;
3462}
3463
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -08003464static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
3465 int pulled_hlen, u16 mac_offset,
3466 int mac_len)
3467{
3468 skb->protocol = protocol;
3469 skb->encapsulation = 1;
3470 skb_push(skb, pulled_hlen);
3471 skb_reset_transport_header(skb);
3472 skb->mac_header = mac_offset;
3473 skb->network_header = skb->mac_header + mac_len;
3474 skb->mac_len = mac_len;
3475}
3476
John Fastabenda6cc0cf2013-11-06 09:54:46 -08003477static inline bool netif_is_macvlan(struct net_device *dev)
3478{
3479 return dev->priv_flags & IFF_MACVLAN;
3480}
3481
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00003482static inline bool netif_is_bond_master(struct net_device *dev)
3483{
3484 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
3485}
3486
David S. Miller4d295152012-03-07 21:02:35 -05003487static inline bool netif_is_bond_slave(struct net_device *dev)
Jiri Pirko1765a572011-02-12 06:48:36 +00003488{
3489 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
3490}
3491
Ben Greear3bdc0eb2012-02-11 15:39:30 +00003492static inline bool netif_supports_nofcs(struct net_device *dev)
3493{
3494 return dev->priv_flags & IFF_SUPP_NOFCS;
3495}
3496
Eric W. Biederman505d4f72008-11-07 22:54:20 -08003497extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00003498
Joe Perches571ba422010-02-09 11:49:47 +00003499/* Logging, debugging and troubleshooting/diagnostic helpers. */
3500
3501/* netdev_printk helpers, similar to dev_printk */
3502
3503static inline const char *netdev_name(const struct net_device *dev)
3504{
Veaceslav Falicoc6f854d2014-07-17 19:46:09 +02003505 if (!dev->name[0] || strchr(dev->name, '%'))
3506 return "(unnamed net_device)";
Joe Perches571ba422010-02-09 11:49:47 +00003507 return dev->name;
3508}
3509
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02003510static inline const char *netdev_reg_state(const struct net_device *dev)
3511{
3512 switch (dev->reg_state) {
3513 case NETREG_UNINITIALIZED: return " (uninitialized)";
3514 case NETREG_REGISTERED: return "";
3515 case NETREG_UNREGISTERING: return " (unregistering)";
3516 case NETREG_UNREGISTERED: return " (unregistered)";
3517 case NETREG_RELEASED: return " (released)";
3518 case NETREG_DUMMY: return " (dummy)";
3519 }
3520
3521 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
3522 return " (unknown)";
3523}
3524
Joe Perchesf629d202013-09-26 14:48:15 -07003525__printf(3, 4)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003526int netdev_printk(const char *level, const struct net_device *dev,
3527 const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07003528__printf(2, 3)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003529int netdev_emerg(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07003530__printf(2, 3)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003531int netdev_alert(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07003532__printf(2, 3)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003533int netdev_crit(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07003534__printf(2, 3)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003535int netdev_err(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07003536__printf(2, 3)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003537int netdev_warn(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07003538__printf(2, 3)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003539int netdev_notice(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07003540__printf(2, 3)
Joe Perchesb9075fa2011-10-31 17:11:33 -07003541int netdev_info(const struct net_device *dev, const char *format, ...);
Joe Perches571ba422010-02-09 11:49:47 +00003542
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03003543#define MODULE_ALIAS_NETDEV(device) \
3544 MODULE_ALIAS("netdev-" device)
3545
Jim Cromieb558c962011-12-19 17:11:18 -05003546#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perches571ba422010-02-09 11:49:47 +00003547#define netdev_dbg(__dev, format, args...) \
3548do { \
Jason Baronffa10cb2011-08-11 14:36:48 -04003549 dynamic_netdev_dbg(__dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00003550} while (0)
Jim Cromieb558c962011-12-19 17:11:18 -05003551#elif defined(DEBUG)
3552#define netdev_dbg(__dev, format, args...) \
3553 netdev_printk(KERN_DEBUG, __dev, format, ##args)
Joe Perches571ba422010-02-09 11:49:47 +00003554#else
3555#define netdev_dbg(__dev, format, args...) \
3556({ \
3557 if (0) \
3558 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3559 0; \
3560})
3561#endif
3562
3563#if defined(VERBOSE_DEBUG)
3564#define netdev_vdbg netdev_dbg
3565#else
3566
3567#define netdev_vdbg(dev, format, args...) \
3568({ \
3569 if (0) \
3570 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3571 0; \
3572})
3573#endif
3574
3575/*
3576 * netdev_WARN() acts like dev_printk(), but with the key difference
3577 * of using a WARN/WARN_ON to get the message out, including the
3578 * file/line information and a backtrace.
3579 */
3580#define netdev_WARN(dev, format, args...) \
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02003581 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
3582 netdev_reg_state(dev), ##args)
Joe Perches571ba422010-02-09 11:49:47 +00003583
Joe Perchesb3d95c52010-02-09 11:49:49 +00003584/* netif printk helpers, similar to netdev_printk */
3585
3586#define netif_printk(priv, type, level, dev, fmt, args...) \
3587do { \
3588 if (netif_msg_##type(priv)) \
3589 netdev_printk(level, (dev), fmt, ##args); \
3590} while (0)
3591
Joe Perchesf45f4322010-06-27 01:02:36 +00003592#define netif_level(level, priv, type, dev, fmt, args...) \
3593do { \
3594 if (netif_msg_##type(priv)) \
3595 netdev_##level(dev, fmt, ##args); \
3596} while (0)
3597
Joe Perchesb3d95c52010-02-09 11:49:49 +00003598#define netif_emerg(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003599 netif_level(emerg, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003600#define netif_alert(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003601 netif_level(alert, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003602#define netif_crit(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003603 netif_level(crit, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003604#define netif_err(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003605 netif_level(err, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003606#define netif_warn(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003607 netif_level(warn, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003608#define netif_notice(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003609 netif_level(notice, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003610#define netif_info(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003611 netif_level(info, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003612
Joe Perches0053ea92012-05-30 07:43:34 +00003613#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003614#define netif_dbg(priv, type, netdev, format, args...) \
3615do { \
3616 if (netif_msg_##type(priv)) \
Jason Baronb5fb0a02011-08-11 14:36:53 -04003617 dynamic_netdev_dbg(netdev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00003618} while (0)
Joe Perches0053ea92012-05-30 07:43:34 +00003619#elif defined(DEBUG)
3620#define netif_dbg(priv, type, dev, format, args...) \
3621 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003622#else
3623#define netif_dbg(priv, type, dev, format, args...) \
3624({ \
3625 if (0) \
3626 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3627 0; \
3628})
3629#endif
3630
3631#if defined(VERBOSE_DEBUG)
Ben Hutchingsbcfcc452010-07-02 07:08:44 +00003632#define netif_vdbg netif_dbg
Joe Perchesb3d95c52010-02-09 11:49:49 +00003633#else
3634#define netif_vdbg(priv, type, dev, format, args...) \
3635({ \
3636 if (0) \
Ben Hutchingsa4ed89c2010-05-18 06:56:32 +00003637 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00003638 0; \
3639})
3640#endif
Joe Perches571ba422010-02-09 11:49:47 +00003641
Cong Wang900ff8c2013-02-18 19:20:33 +00003642/*
3643 * The list of packet types we will receive (as opposed to discard)
3644 * and the routines to invoke.
3645 *
3646 * Why 16. Because with 16 the only overlap we get on a hash of the
3647 * low nibble of the protocol value is RARP/SNAP/X.25.
3648 *
3649 * NOTE: That is no longer true with the addition of VLAN tags. Not
3650 * sure which should go first, but I bet it won't make much
3651 * difference if we are running VLANs. The good news is that
3652 * this protocol won't be in the list unless compiled in, so
3653 * the average user (w/out VLANs) will not be adversely affected.
3654 * --BLG
3655 *
3656 * 0800 IP
3657 * 8100 802.1Q VLAN
3658 * 0001 802.3
3659 * 0002 AX.25
3660 * 0004 802.2
3661 * 8035 RARP
3662 * 0005 SNAP
3663 * 0805 X.25
3664 * 0806 ARP
3665 * 8137 IPX
3666 * 0009 Localtalk
3667 * 86DD IPv6
3668 */
3669#define PTYPE_HASH_SIZE (16)
3670#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3671
Jiri Pirko385a1542009-05-27 15:48:07 -07003672#endif /* _LINUX_NETDEVICE_H */