blob: 2a801380b502c626d56c3699d84e1143333fc62c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070014 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
Al Virod7fe0f22006-12-03 23:15:30 -050033#include <linux/timer.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070034#include <linux/delay.h>
Dmitri Vorobievcc0be322009-03-27 15:55:36 -070035#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/atomic.h>
37#include <asm/cache.h>
38#include <asm/byteorder.h>
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/device.h>
41#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070042#include <linux/rculist.h>
Chris Leechdb217332006-06-17 21:24:58 -070043#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070044#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000046#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020047#include <net/net_namespace.h>
Lennert Buytenhekcf85d082008-10-07 13:45:02 +000048#include <net/dsa.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080049#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080050#include <net/dcbnl.h>
51#endif
Daniel Lezcanoa050c332007-09-12 14:57:09 +020052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053struct vlan_group;
Jeff Moyer115c1d62005-06-22 22:05:31 -070054struct netpoll_info;
Johannes Berg704232c2007-04-23 12:20:05 -070055/* 802.11 specific */
56struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 /* source back-compat hooks */
58#define SET_ETHTOOL_OPS(netdev,ops) \
59 ( (netdev)->ethtool_ops = (ops) )
60
61#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
62 functions are available. */
63#define HAVE_FREE_NETDEV /* free_netdev() */
64#define HAVE_NETDEV_PRIV /* netdev_priv() */
65
66#define NET_XMIT_SUCCESS 0
67#define NET_XMIT_DROP 1 /* skb dropped */
68#define NET_XMIT_CN 2 /* congestion notification */
69#define NET_XMIT_POLICED 3 /* skb is shot by police */
Jarek Poplawski378a2f02008-08-04 22:31:03 -070070#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72/* Backlog congestion levels */
73#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
74#define NET_RX_DROP 1 /* packet dropped */
75#define NET_RX_CN_LOW 2 /* storm alert, just in case */
76#define NET_RX_CN_MOD 3 /* Storm on its way! */
77#define NET_RX_CN_HIGH 4 /* The storm is here */
78#define NET_RX_BAD 5 /* packet dropped due to kernel error */
79
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -020080/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
81 * indicates that the device will soon be dropping packets, or already drops
82 * some packets of the same priority; prompting us to send less aggressively. */
83#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
85
86#endif
87
88#define MAX_ADDR_LEN 32 /* Largest hardware address length */
89
90/* Driver transmit return codes */
91#define NETDEV_TX_OK 0 /* driver took care of packet */
92#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
93#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
94
Adrian Bunkc88e6f52008-06-27 19:54:54 -070095#ifdef __KERNEL__
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/*
98 * Compute the worst case header length according to the protocols
99 * used.
100 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800101
David S. Miller8388e3d2008-05-12 20:17:33 -0700102#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
103# if defined(CONFIG_MAC80211_MESH)
104# define LL_MAX_HEADER 128
105# else
106# define LL_MAX_HEADER 96
107# endif
Adrian Bunkc759a6b2009-04-27 02:36:20 -0700108#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
David S. Miller8388e3d2008-05-12 20:17:33 -0700109# define LL_MAX_HEADER 48
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700111# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#endif
113
David S. Millere81c73592006-11-28 20:53:39 -0800114#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
115 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
116 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
117 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#define MAX_HEADER LL_MAX_HEADER
119#else
120#define MAX_HEADER (LL_MAX_HEADER + 48)
121#endif
122
Adrian Bunkc88e6f52008-06-27 19:54:54 -0700123#endif /* __KERNEL__ */
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125/*
126 * Network device statistics. Akin to the 2.0 ether stats but
127 * with byte counters.
128 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130struct net_device_stats
131{
132 unsigned long rx_packets; /* total packets received */
133 unsigned long tx_packets; /* total packets transmitted */
134 unsigned long rx_bytes; /* total bytes received */
135 unsigned long tx_bytes; /* total bytes transmitted */
136 unsigned long rx_errors; /* bad packets received */
137 unsigned long tx_errors; /* packet transmit problems */
138 unsigned long rx_dropped; /* no space in linux buffers */
139 unsigned long tx_dropped; /* no space available in linux */
140 unsigned long multicast; /* multicast packets received */
141 unsigned long collisions;
142
143 /* detailed rx_errors: */
144 unsigned long rx_length_errors;
145 unsigned long rx_over_errors; /* receiver ring buff overflow */
146 unsigned long rx_crc_errors; /* recved pkt with crc error */
147 unsigned long rx_frame_errors; /* recv'd frame alignment error */
148 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
149 unsigned long rx_missed_errors; /* receiver missed packet */
150
151 /* detailed tx_errors */
152 unsigned long tx_aborted_errors;
153 unsigned long tx_carrier_errors;
154 unsigned long tx_fifo_errors;
155 unsigned long tx_heartbeat_errors;
156 unsigned long tx_window_errors;
157
158 /* for cslip etc */
159 unsigned long rx_compressed;
160 unsigned long tx_compressed;
161};
162
163
164/* Media selection options. */
165enum {
166 IF_PORT_UNKNOWN = 0,
167 IF_PORT_10BASE2,
168 IF_PORT_10BASET,
169 IF_PORT_AUI,
170 IF_PORT_100BASET,
171 IF_PORT_100BASETX,
172 IF_PORT_100BASEFX
173};
174
175#ifdef __KERNEL__
176
177#include <linux/cache.h>
178#include <linux/skbuff.h>
179
180struct neighbour;
181struct neigh_parms;
182struct sk_buff;
183
184struct netif_rx_stats
185{
186 unsigned total;
187 unsigned dropped;
188 unsigned time_squeeze;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 unsigned cpu_collision;
190};
191
192DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
193
Patrick McHardybf742482007-06-27 01:26:19 -0700194struct dev_addr_list
195{
196 struct dev_addr_list *next;
197 u8 da_addr[MAX_ADDR_LEN];
198 u8 da_addrlen;
Patrick McHardya0a400d2007-07-14 18:52:02 -0700199 u8 da_synced;
Patrick McHardybf742482007-06-27 01:26:19 -0700200 int da_users;
201 int da_gusers;
202};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204/*
205 * We tag multicasts with these structures.
206 */
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700207
208#define dev_mc_list dev_addr_list
209#define dmi_addr da_addr
210#define dmi_addrlen da_addrlen
211#define dmi_users da_users
212#define dmi_gusers da_gusers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Jiri Pirkof001fde2009-05-05 02:48:28 +0000214struct netdev_hw_addr {
215 struct list_head list;
216 unsigned char addr[MAX_ADDR_LEN];
217 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000218#define NETDEV_HW_ADDR_T_LAN 1
219#define NETDEV_HW_ADDR_T_SAN 2
220#define NETDEV_HW_ADDR_T_SLAVE 3
221#define NETDEV_HW_ADDR_T_UNICAST 4
222 int refcount;
223 bool synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000224 struct rcu_head rcu_head;
225};
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227struct hh_cache
228{
229 struct hh_cache *hh_next; /* Next entry */
230 atomic_t hh_refcnt; /* number of users */
Eric Dumazetf0490982006-12-08 00:08:43 -0800231/*
232 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
233 * cache line on SMP.
234 * They are mostly read, but hh_refcnt may be changed quite frequently,
235 * incurring cache line ping pongs.
236 */
237 __be16 hh_type ____cacheline_aligned_in_smp;
238 /* protocol identifier, f.e ETH_P_IP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 * NOTE: For VLANs, this will be the
240 * encapuslated type. --BLG
241 */
Arnaldo Carvalho de Melod5c42c02006-11-27 17:58:02 -0200242 u16 hh_len; /* length of header */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 int (*hh_output)(struct sk_buff *skb);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800244 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246 /* cached hardware header; allow for machine alignment needs. */
247#define HH_DATA_MOD 16
248#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700249 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#define HH_DATA_ALIGN(__len) \
251 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
252 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
253};
254
255/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
256 * Alternative is:
257 * dev->hard_header_len ? (dev->hard_header_len +
258 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
259 *
260 * We could use other alignment values, but we must maintain the
261 * relationship HH alignment <= LL alignment.
Johannes Bergf5184d22008-05-12 20:48:31 -0700262 *
263 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
264 * may need.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 */
266#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700267 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700269 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
270#define LL_ALLOCATED_SPACE(dev) \
271 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700273struct header_ops {
274 int (*create) (struct sk_buff *skb, struct net_device *dev,
275 unsigned short type, const void *daddr,
276 const void *saddr, unsigned len);
277 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
278 int (*rebuild)(struct sk_buff *skb);
279#define HAVE_HEADER_CACHE
280 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
281 void (*cache_update)(struct hh_cache *hh,
282 const struct net_device *dev,
283 const unsigned char *haddr);
284};
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286/* These flag bits are private to the generic network queueing
287 * layer, they may not be explicitly referenced by any other
288 * code.
289 */
290
291enum netdev_state_t
292{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 __LINK_STATE_START,
294 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800296 __LINK_STATE_LINKWATCH_PENDING,
297 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298};
299
300
301/*
302 * This structure holds at boot time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800303 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 */
305struct netdev_boot_setup {
306 char name[IFNAMSIZ];
307 struct ifmap map;
308};
309#define NETDEV_BOOT_SETUP_MAX 8
310
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -0300311extern int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700314 * Structure for NAPI scheduling similar to tasklet but with weighting
315 */
316struct napi_struct {
317 /* The poll_list must only be managed by the entity which
318 * changes the state of the NAPI_STATE_SCHED bit. This means
319 * whoever atomically sets that bit can add this napi_struct
320 * to the per-cpu poll_list, and whoever clears that bit
321 * can remove from the list right before clearing the bit.
322 */
323 struct list_head poll_list;
324
325 unsigned long state;
326 int weight;
327 int (*poll)(struct napi_struct *, int);
328#ifdef CONFIG_NETPOLL
329 spinlock_t poll_lock;
330 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700331#endif
Herbert Xu4ae55442009-02-08 18:00:36 +0000332
333 unsigned int gro_count;
334
Herbert Xu5d38a072009-01-04 16:13:40 -0800335 struct net_device *dev;
Herbert Xud565b0a2008-12-15 23:38:52 -0800336 struct list_head dev_list;
337 struct sk_buff *gro_list;
Herbert Xu5d38a072009-01-04 16:13:40 -0800338 struct sk_buff *skb;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700339};
340
341enum
342{
343 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800344 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800345 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700346};
347
Herbert Xud1c76af2009-03-16 10:50:02 -0700348enum {
349 GRO_MERGED,
350 GRO_MERGED_FREE,
351 GRO_HELD,
352 GRO_NORMAL,
353 GRO_DROP,
354};
355
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800356extern void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700357
David S. Millera0a46192008-01-07 20:35:07 -0800358static inline int napi_disable_pending(struct napi_struct *n)
359{
360 return test_bit(NAPI_STATE_DISABLE, &n->state);
361}
362
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700363/**
364 * napi_schedule_prep - check if napi can be scheduled
365 * @n: napi context
366 *
367 * Test if NAPI routine is already running, and if not mark
368 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800369 * insure only one NAPI poll instance runs. We also make
370 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700371 */
372static inline int napi_schedule_prep(struct napi_struct *n)
373{
David S. Millera0a46192008-01-07 20:35:07 -0800374 return !napi_disable_pending(n) &&
375 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700376}
377
378/**
379 * napi_schedule - schedule NAPI poll
380 * @n: napi context
381 *
382 * Schedule NAPI poll routine to be called if it is not already
383 * running.
384 */
385static inline void napi_schedule(struct napi_struct *n)
386{
387 if (napi_schedule_prep(n))
388 __napi_schedule(n);
389}
390
Roland Dreierbfe13f52007-10-09 15:47:37 -0700391/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
392static inline int napi_reschedule(struct napi_struct *napi)
393{
394 if (napi_schedule_prep(napi)) {
395 __napi_schedule(napi);
396 return 1;
397 }
398 return 0;
399}
400
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700401/**
402 * napi_complete - NAPI processing complete
403 * @n: napi context
404 *
405 * Mark NAPI processing as complete.
406 */
Herbert Xud565b0a2008-12-15 23:38:52 -0800407extern void __napi_complete(struct napi_struct *n);
408extern void napi_complete(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700409
410/**
411 * napi_disable - prevent NAPI from scheduling
412 * @n: napi context
413 *
414 * Stop NAPI from being scheduled on this context.
415 * Waits till any outstanding processing completes.
416 */
417static inline void napi_disable(struct napi_struct *n)
418{
David S. Millera0a46192008-01-07 20:35:07 -0800419 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700420 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700421 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800422 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700423}
424
425/**
426 * napi_enable - enable NAPI scheduling
427 * @n: napi context
428 *
429 * Resume NAPI from being scheduled on this context.
430 * Must be paired with napi_disable.
431 */
432static inline void napi_enable(struct napi_struct *n)
433{
434 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
435 smp_mb__before_clear_bit();
436 clear_bit(NAPI_STATE_SCHED, &n->state);
437}
438
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700439#ifdef CONFIG_SMP
440/**
441 * napi_synchronize - wait until NAPI is not running
442 * @n: napi context
443 *
444 * Wait until NAPI is done being scheduled on this context.
445 * Waits till any outstanding processing completes but
446 * does not disable future activations.
447 */
448static inline void napi_synchronize(const struct napi_struct *n)
449{
450 while (test_bit(NAPI_STATE_SCHED, &n->state))
451 msleep(1);
452}
453#else
454# define napi_synchronize(n) barrier()
455#endif
456
David S. Miller79d16382008-07-08 23:14:46 -0700457enum netdev_queue_state_t
458{
459 __QUEUE_STATE_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700460 __QUEUE_STATE_FROZEN,
David S. Miller79d16382008-07-08 23:14:46 -0700461};
462
David S. Millerbb949fb2008-07-08 16:55:56 -0700463struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700464/*
465 * read mostly part
466 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700467 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700468 struct Qdisc *qdisc;
David S. Miller79d16382008-07-08 23:14:46 -0700469 unsigned long state;
David S. Millerb0e1e642008-07-08 17:42:10 -0700470 struct Qdisc *qdisc_sleeping;
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700471/*
472 * write mostly part
473 */
474 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
475 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700476 /*
477 * please use this field instead of dev->trans_start
478 */
479 unsigned long trans_start;
Eric Dumazet7004bf22009-05-18 00:34:33 +0000480 unsigned long tx_bytes;
481 unsigned long tx_packets;
482 unsigned long tx_dropped;
David S. Millere8a04642008-07-17 00:34:19 -0700483} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700484
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800485
486/*
487 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800488 * The following hooks can be defined; unless noted otherwise, they are
489 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800490 *
491 * int (*ndo_init)(struct net_device *dev);
492 * This function is called once when network device is registered.
493 * The network device can use this to any late stage initializaton
494 * or semantic validattion. It can fail with an error code which will
495 * be propogated back to register_netdev
496 *
497 * void (*ndo_uninit)(struct net_device *dev);
498 * This function is called when device is unregistered or when registration
499 * fails. It is not called if init fails.
500 *
501 * int (*ndo_open)(struct net_device *dev);
502 * This function is called when network device transistions to the up
503 * state.
504 *
505 * int (*ndo_stop)(struct net_device *dev);
506 * This function is called when network device transistions to the down
507 * state.
508 *
Krzysztof Hałasa985ebdb2009-01-12 16:32:13 -0800509 * int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800510 * Called when a packet needs to be transmitted.
511 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
512 * Required can not be NULL.
513 *
514 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
515 * Called to decide which queue to when device supports multiple
516 * transmit queues.
517 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800518 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
519 * This function is called to allow device receiver to make
520 * changes to configuration when multicast or promiscious is enabled.
521 *
522 * void (*ndo_set_rx_mode)(struct net_device *dev);
523 * This function is called device changes address list filtering.
524 *
525 * void (*ndo_set_multicast_list)(struct net_device *dev);
526 * This function is called when the multicast address list changes.
527 *
528 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
529 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -0700530 * needs to be changed. If this interface is not defined, the
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800531 * mac address can not be changed.
532 *
533 * int (*ndo_validate_addr)(struct net_device *dev);
534 * Test if Media Access Control address is valid for the device.
535 *
536 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
537 * Called when a user request an ioctl which can't be handled by
538 * the generic interface code. If not defined ioctl's return
539 * not supported error code.
540 *
541 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
542 * Used to set network devices bus interface parameters. This interface
543 * is retained for legacy reason, new devices should use the bus
544 * interface (PCI) for low level management.
545 *
546 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
547 * Called when a user wants to change the Maximum Transfer Unit
548 * of a device. If not defined, any request to change MTU will
549 * will return an error.
550 *
Stephen Hemminger00829822008-11-20 20:14:53 -0800551 * void (*ndo_tx_timeout)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800552 * Callback uses when the transmitter has not made any progress
553 * for dev->watchdog ticks.
554 *
555 * struct net_device_stats* (*get_stats)(struct net_device *dev);
556 * Called when a user wants to get the network device usage
557 * statistics. If not defined, the counters in dev->stats will
558 * be used.
559 *
560 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
561 * If device support VLAN receive accleration
562 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
563 * when vlan groups for the device changes. Note: grp is NULL
564 * if no vlan's groups are being used.
565 *
566 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
567 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
568 * this function is called when a VLAN id is registered.
569 *
570 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
571 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
572 * this function is called when a VLAN id is unregistered.
573 *
574 * void (*ndo_poll_controller)(struct net_device *dev);
575 */
Stephen Hemminger47fd5b82008-11-25 00:20:43 -0800576#define HAVE_NET_DEVICE_OPS
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800577struct net_device_ops {
578 int (*ndo_init)(struct net_device *dev);
579 void (*ndo_uninit)(struct net_device *dev);
580 int (*ndo_open)(struct net_device *dev);
581 int (*ndo_stop)(struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800582 int (*ndo_start_xmit) (struct sk_buff *skb,
583 struct net_device *dev);
584 u16 (*ndo_select_queue)(struct net_device *dev,
585 struct sk_buff *skb);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800586#define HAVE_CHANGE_RX_FLAGS
587 void (*ndo_change_rx_flags)(struct net_device *dev,
588 int flags);
589#define HAVE_SET_RX_MODE
590 void (*ndo_set_rx_mode)(struct net_device *dev);
591#define HAVE_MULTICAST
592 void (*ndo_set_multicast_list)(struct net_device *dev);
593#define HAVE_SET_MAC_ADDR
594 int (*ndo_set_mac_address)(struct net_device *dev,
595 void *addr);
596#define HAVE_VALIDATE_ADDR
597 int (*ndo_validate_addr)(struct net_device *dev);
598#define HAVE_PRIVATE_IOCTL
599 int (*ndo_do_ioctl)(struct net_device *dev,
600 struct ifreq *ifr, int cmd);
601#define HAVE_SET_CONFIG
602 int (*ndo_set_config)(struct net_device *dev,
603 struct ifmap *map);
604#define HAVE_CHANGE_MTU
Stephen Hemminger00829822008-11-20 20:14:53 -0800605 int (*ndo_change_mtu)(struct net_device *dev,
606 int new_mtu);
607 int (*ndo_neigh_setup)(struct net_device *dev,
608 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800609#define HAVE_TX_TIMEOUT
610 void (*ndo_tx_timeout) (struct net_device *dev);
611
612 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
613
614 void (*ndo_vlan_rx_register)(struct net_device *dev,
615 struct vlan_group *grp);
616 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
617 unsigned short vid);
618 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
619 unsigned short vid);
620#ifdef CONFIG_NET_POLL_CONTROLLER
621#define HAVE_NETDEV_POLL
622 void (*ndo_poll_controller)(struct net_device *dev);
623#endif
Yi Zou4d288d52009-02-27 14:06:59 -0800624#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
625 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
626 u16 xid,
627 struct scatterlist *sgl,
628 unsigned int sgc);
629 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
630 u16 xid);
631#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800632};
633
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700634/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 * The DEVICE structure.
636 * Actually, this whole structure is a big mistake. It mixes I/O
637 * data with strictly "high-level" data, and it has to know about
638 * almost every data structure used in the INET module.
639 *
640 * FIXME: cleanup struct net_device such that network protocol info
641 * moves out.
642 */
643
644struct net_device
645{
646
647 /*
648 * This is the first field of the "visible" part of this structure
649 * (i.e. as seen by users in the "Space.c" file). It is the name
650 * the interface.
651 */
652 char name[IFNAMSIZ];
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700653 /* device name hash chain */
654 struct hlist_node name_hlist;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700655 /* snmp alias */
656 char *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
658 /*
659 * I/O specific fields
660 * FIXME: Merge these and struct ifmap into one
661 */
662 unsigned long mem_end; /* shared mem end */
663 unsigned long mem_start; /* shared mem start */
664 unsigned long base_addr; /* device I/O address */
665 unsigned int irq; /* device IRQ number */
666
667 /*
668 * Some hardware also needs these fields, but they are not
669 * part of the usual set specified in Space.c.
670 */
671
672 unsigned char if_port; /* Selectable AUI, TP,..*/
673 unsigned char dma; /* DMA channel */
674
675 unsigned long state;
676
Pavel Emelianov7562f872007-05-03 15:13:45 -0700677 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700678 struct list_head napi_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700680 /* Net device features */
681 unsigned long features;
682#define NETIF_F_SG 1 /* Scatter/gather IO. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700683#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700684#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
685#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700686#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700687#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
688#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
689#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
690#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
691#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
692#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
Herbert Xu37c31852006-06-22 03:07:29 -0700693#define NETIF_F_GSO 2048 /* Enable software GSO. */
Christian Borntraegere24eb522007-09-25 19:42:02 -0700694#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
695 /* do not use LLTX in new drivers */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200696#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
Herbert Xud565b0a2008-12-15 23:38:52 -0800697#define NETIF_F_GRO 16384 /* Generic receive offload */
Jeff Garzik3ae7c0b2007-08-15 16:00:51 -0700698#define NETIF_F_LRO 32768 /* large receive offload */
Herbert Xu79671682006-06-22 02:40:14 -0700699
Jesse Brandeburg8dc92f72009-04-27 22:35:52 +0000700/* the GSO_MASK reserves bits 16 through 23 */
Chris Leech01d5b2f2009-02-27 14:06:49 -0800701#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
Jesse Brandeburg8dc92f72009-04-27 22:35:52 +0000702#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
Chris Leech01d5b2f2009-02-27 14:06:49 -0800703
Herbert Xu79671682006-06-22 02:40:14 -0700704 /* Segmentation offload features */
Patrick McHardy289c79a2008-05-23 00:22:04 -0700705#define NETIF_F_GSO_SHIFT 16
Chris Leech43eb99c2009-02-27 14:06:43 -0800706#define NETIF_F_GSO_MASK 0x00ff0000
Herbert Xu79671682006-06-22 02:40:14 -0700707#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700708#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
Herbert Xu576a30e2006-06-27 13:22:38 -0700709#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700710#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
711#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
Chris Leech01d5b2f2009-02-27 14:06:49 -0800712#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700713
Herbert Xu78eb8872006-08-17 18:22:32 -0700714 /* List of features with software fallbacks. */
715#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
716
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700717
Herbert Xu8648b302006-06-17 22:06:05 -0700718#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700719#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
720#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
721#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
Herbert Xu8648b302006-06-17 22:06:05 -0700722
Herbert Xub63365a2008-10-23 01:11:29 -0700723 /*
724 * If one device supports one of these features, then enable them
725 * for all in netdev_increment_features.
726 */
727#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800728 NETIF_F_SG | NETIF_F_HIGHDMA | \
Herbert Xub63365a2008-10-23 01:11:29 -0700729 NETIF_F_FRAGLIST)
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* Interface index. Unique device identifier */
732 int ifindex;
733 int iflink;
734
Rusty Russellc45d2862007-03-28 14:29:08 -0700735 struct net_device_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Johannes Bergb86e0282007-04-26 20:48:23 -0700737#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 /* List of functions to handle Wireless Extensions (instead of ioctl).
739 * See <net/iw_handler.h> for details. Jean II */
740 const struct iw_handler_def * wireless_handlers;
741 /* Instance data managed by the core of Wireless Extensions. */
742 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -0700743#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800744 /* Management operations */
745 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -0700746 const struct ethtool_ops *ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700748 /* Hardware header description */
749 const struct header_ops *header_ops;
750
Stefan Rompfb00055a2006-03-20 17:09:11 -0800751 unsigned int flags; /* interface flags (a la BSD) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 unsigned short gflags;
753 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
754 unsigned short padded; /* How much padding added by alloc_netdev() */
755
Stefan Rompfb00055a2006-03-20 17:09:11 -0800756 unsigned char operstate; /* RFC2863 operstate */
757 unsigned char link_mode; /* mapping policy to operstate */
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 unsigned mtu; /* interface MTU value */
760 unsigned short type; /* interface hardware type */
761 unsigned short hard_header_len; /* hardware hdr length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Johannes Bergf5184d22008-05-12 20:48:31 -0700763 /* extra head- and tailroom the hardware may need, but not in all cases
764 * can this be guaranteed, especially tailroom. Some cases also use
765 * LL_MAX_HEADER instead to allocate the skb.
766 */
767 unsigned short needed_headroom;
768 unsigned short needed_tailroom;
769
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 struct net_device *master; /* Pointer to master device of a group,
771 * which this device is member of.
772 */
773
774 /* Interface address info. */
Jon Wetzela6f9a702005-08-20 17:15:54 -0700775 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 unsigned char addr_len; /* hardware address length */
777 unsigned short dev_id; /* for shared network cards */
778
Jiri Pirkoccffad252009-05-22 23:22:17 +0000779 struct list_head uc_list; /* Secondary unicast mac
780 addresses */
Patrick McHardy4417da62007-06-27 01:28:10 -0700781 int uc_count; /* Number of installed ucasts */
782 int uc_promisc;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000783 spinlock_t addr_list_lock;
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700784 struct dev_addr_list *mc_list; /* Multicast mac addresses */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 int mc_count; /* Number of installed mcasts */
Wang Chen9d45abe2008-06-17 21:12:48 -0700786 unsigned int promiscuity;
787 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 /* Protocol specific pointers */
791
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000792#ifdef CONFIG_NET_DSA
793 void *dsa_ptr; /* dsa specific data */
794#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 void *atalk_ptr; /* AppleTalk link */
Graf Yangfe2918b2009-02-05 21:26:19 -0800796 void *ip_ptr; /* IPv4 specific data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 void *dn_ptr; /* DECnet specific data */
798 void *ip6_ptr; /* IPv6 specific data */
799 void *ec_ptr; /* Econet specific data */
800 void *ax25_ptr; /* AX.25 specific data */
Johannes Berg704232c2007-04-23 12:20:05 -0700801 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
802 assign before registering */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700804/*
805 * Cache line mostly used on receive path (including eth_type_trans())
806 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700807 unsigned long last_rx; /* Time of last Rx */
808 /* Interface address info used in eth_type_trans() */
Jiri Pirkof001fde2009-05-05 02:48:28 +0000809 unsigned char *dev_addr; /* hw address, (before bcast
810 because most packets are
811 unicast) */
812
813 struct list_head dev_addr_list; /* list of device hw addresses */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700815 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
816
David S. Millerbb949fb2008-07-08 16:55:56 -0700817 struct netdev_queue rx_queue;
David S. Millere8a04642008-07-17 00:34:19 -0700818
819 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700820
821 /* Number of TX queues allocated at alloc_netdev_mq() time */
David S. Millere8a04642008-07-17 00:34:19 -0700822 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700823
824 /* Number of TX queues currently active in device */
825 unsigned int real_num_tx_queues;
826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 unsigned long tx_queue_len; /* Max frames per queue allowed */
David S. Millerc3f26a22008-07-31 16:58:50 -0700828 spinlock_t tx_global_lock;
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700829/*
830 * One part is mostly used on xmit path (device)
831 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700832 /* These may be needed for future network-power-down code. */
Eric Dumazet9d214932009-05-17 20:55:16 -0700833
834 /*
835 * trans_start here is expensive for high speed devices on SMP,
836 * please use netdev_queue->trans_start instead.
837 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700838 unsigned long trans_start; /* Time (in jiffies) of last Tx */
839
840 int watchdog_timeo; /* used by dev_watchdog() */
841 struct timer_list watchdog_timer;
842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 /* Number of references to this device */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700844 atomic_t refcnt ____cacheline_aligned_in_smp;
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 /* delayed register/unregister */
847 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 /* device index hash chain */
849 struct hlist_node index_hlist;
850
Herbert Xu572a1032007-05-08 18:34:17 -0700851 struct net_device *link_watch_next;
852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 /* register/unregister state machine */
854 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -0700855 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 NETREG_UNREGISTERING, /* called unregister_netdevice */
857 NETREG_UNREGISTERED, /* completed unregister todo */
858 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -0800859 NETREG_DUMMY, /* dummy device for NAPI poll */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 } reg_state;
861
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800862 /* Called from unregister, can be used to call free_netdev */
863 void (*destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865#ifdef CONFIG_NETPOLL
Jeff Moyer115c1d62005-06-22 22:05:31 -0700866 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867#endif
David S. Millereae792b2008-07-15 03:03:33 -0700868
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900869#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200870 /* Network namespace this network device is inside */
871 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900872#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200873
David S. Miller49517042008-05-12 03:29:11 -0700874 /* mid-layer private */
875 void *ml_priv;
876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 /* bridge stuff */
878 struct net_bridge_port *br_port;
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700879 /* macvlan */
880 struct macvlan_port *macvlan_port;
Patrick McHardyeca9eba2008-07-05 21:26:13 -0700881 /* GARP */
882 struct garp_port *garp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 /* class/net/name entry */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700885 struct device dev;
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -0700886 /* space for optional statistics and wireless sysfs groups */
887 struct attribute_group *sysfs_groups[3];
Patrick McHardy38f7b872007-06-13 12:03:51 -0700888
889 /* rtnetlink link ops */
890 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700891
Patrick McHardy289c79a2008-05-23 00:22:04 -0700892 /* VLAN feature mask */
893 unsigned long vlan_features;
894
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -0700895 /* for setting kernel sock attribute on TCP connection setup */
896#define GSO_MAX_SIZE 65536
897 unsigned int gso_max_size;
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800898
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -0800899#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -0800900 /* Data Center Bridging netlink ops */
901 struct dcbnl_rtnl_ops *dcbnl_ops;
902#endif
903
Yi Zou4d288d52009-02-27 14:06:59 -0800904#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
905 /* max exchange id for FCoE LRO by ddp */
906 unsigned int fcoe_ddp_xid;
907#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700909#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
David S. Millere8a04642008-07-17 00:34:19 -0700913static inline
914struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
915 unsigned int index)
916{
917 return &dev->_tx[index];
918}
919
920static inline void netdev_for_each_tx_queue(struct net_device *dev,
921 void (*f)(struct net_device *,
922 struct netdev_queue *,
923 void *),
924 void *arg)
925{
926 unsigned int i;
927
928 for (i = 0; i < dev->num_tx_queues; i++)
929 f(dev, &dev->_tx[i], arg);
930}
931
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900932/*
933 * Net namespace inlines
934 */
935static inline
936struct net *dev_net(const struct net_device *dev)
937{
938#ifdef CONFIG_NET_NS
939 return dev->nd_net;
940#else
941 return &init_net;
942#endif
943}
944
945static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -0700946void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900947{
948#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -0700949 release_net(dev->nd_net);
950 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900951#endif
952}
953
Lennert Buytenhekcf85d082008-10-07 13:45:02 +0000954static inline bool netdev_uses_dsa_tags(struct net_device *dev)
955{
956#ifdef CONFIG_NET_DSA_TAG_DSA
957 if (dev->dsa_ptr != NULL)
958 return dsa_uses_dsa_tags(dev->dsa_ptr);
959#endif
960
961 return 0;
962}
963
Lennert Buytenhek396138f02008-10-07 13:46:07 +0000964static inline bool netdev_uses_trailer_tags(struct net_device *dev)
965{
966#ifdef CONFIG_NET_DSA_TAG_TRAILER
967 if (dev->dsa_ptr != NULL)
968 return dsa_uses_trailer_tags(dev->dsa_ptr);
969#endif
970
971 return 0;
972}
973
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700974/**
975 * netdev_priv - access network device private data
976 * @dev: network device
977 *
978 * Get network device private data
979 */
Patrick McHardy6472ce62007-06-13 12:03:21 -0700980static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +0000982 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985/* Set the sysfs physical device reference for the network logical device
986 * if set prior to registration will cause a symlink during initialization.
987 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700988#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
Stephen Hemminger3b582cc2007-11-01 02:21:47 -0700990/**
991 * netif_napi_add - initialize a napi context
992 * @dev: network device
993 * @napi: napi context
994 * @poll: polling function
995 * @weight: default weight
996 *
997 * netif_napi_add() must be used to initialize a napi context prior to calling
998 * *any* of the other napi related functions.
999 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001000void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1001 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001002
Alexander Duyckd8156532008-07-08 15:13:05 -07001003/**
1004 * netif_napi_del - remove a napi context
1005 * @napi: napi context
1006 *
1007 * netif_napi_del() removes a napi context from the network device napi list
1008 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001009void netif_napi_del(struct napi_struct *napi);
1010
1011struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00001012 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1013 void *frag0;
1014
Herbert Xu74895942009-05-26 18:50:27 +00001015 /* Length of frag0. */
1016 unsigned int frag0_len;
1017
Herbert Xu86911732009-01-29 14:19:50 +00001018 /* This indicates where we are processing relative to skb->data. */
1019 int data_offset;
1020
Herbert Xud565b0a2008-12-15 23:38:52 -08001021 /* This is non-zero if the packet may be of the same flow. */
1022 int same_flow;
1023
1024 /* This is non-zero if the packet cannot be merged with the new skb. */
1025 int flush;
1026
1027 /* Number of segments aggregated. */
1028 int count;
Herbert Xu5d38a072009-01-04 16:13:40 -08001029
1030 /* Free the skb? */
1031 int free;
Herbert Xud565b0a2008-12-15 23:38:52 -08001032};
1033
1034#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07001035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001037 __be16 type; /* This is really htons(ether_type). */
1038 struct net_device *dev; /* NULL is wildcarded here */
1039 int (*func) (struct sk_buff *,
1040 struct net_device *,
1041 struct packet_type *,
1042 struct net_device *);
Herbert Xu576a30e2006-06-27 13:22:38 -07001043 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1044 int features);
Herbert Xua430a432006-07-08 13:34:56 -07001045 int (*gso_send_check)(struct sk_buff *skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08001046 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1047 struct sk_buff *skb);
1048 int (*gro_complete)(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 void *af_packet_priv;
1050 struct list_head list;
1051};
1052
1053#include <linux/interrupt.h>
1054#include <linux/notifier.h>
1055
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056extern rwlock_t dev_base_lock; /* Device list lock */
1057
Eric W. Biederman881d9662007-09-17 11:56:21 -07001058
1059#define for_each_netdev(net, d) \
1060 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1061#define for_each_netdev_safe(net, d, n) \
1062 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1063#define for_each_netdev_continue(net, d) \
1064 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001065#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1066
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001067static inline struct net_device *next_net_device(struct net_device *dev)
1068{
1069 struct list_head *lh;
1070 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07001071
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001072 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001073 lh = dev->dev_list.next;
1074 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1075}
1076
1077static inline struct net_device *first_net_device(struct net *net)
1078{
1079 return list_empty(&net->dev_base_head) ? NULL :
1080 net_device_entry(net->dev_base_head.next);
1081}
Pavel Emelianov7562f872007-05-03 15:13:45 -07001082
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083extern int netdev_boot_setup_check(struct net_device *dev);
1084extern unsigned long netdev_boot_base(const char *prefix, int unit);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001085extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
1086extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1087extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088extern void dev_add_pack(struct packet_type *pt);
1089extern void dev_remove_pack(struct packet_type *pt);
1090extern void __dev_remove_pack(struct packet_type *pt);
1091
Eric W. Biederman881d9662007-09-17 11:56:21 -07001092extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 unsigned short mask);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001094extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1095extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096extern int dev_alloc_name(struct net_device *dev, const char *name);
1097extern int dev_open(struct net_device *dev);
1098extern int dev_close(struct net_device *dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001099extern void dev_disable_lro(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100extern int dev_queue_xmit(struct sk_buff *skb);
1101extern int register_netdevice(struct net_device *dev);
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08001102extern void unregister_netdevice(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103extern void free_netdev(struct net_device *dev);
1104extern void synchronize_net(void);
1105extern int register_netdevice_notifier(struct notifier_block *nb);
1106extern int unregister_netdevice_notifier(struct notifier_block *nb);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001107extern int init_dummy_netdev(struct net_device *dev);
David S. Miller9d40bbd2009-03-04 23:46:25 -08001108extern void netdev_resync_ops(struct net_device *dev);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001109
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001110extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001111extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1112extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113extern int dev_restart(struct net_device *dev);
1114#ifdef CONFIG_NETPOLL_TRAP
1115extern int netpoll_trap(void);
1116#endif
Herbert Xu86911732009-01-29 14:19:50 +00001117extern int skb_gro_receive(struct sk_buff **head,
1118 struct sk_buff *skb);
Herbert Xu78a478d2009-05-26 18:50:21 +00001119extern void skb_gro_reset_offset(struct sk_buff *skb);
Herbert Xu86911732009-01-29 14:19:50 +00001120
1121static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1122{
1123 return NAPI_GRO_CB(skb)->data_offset;
1124}
1125
1126static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1127{
1128 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1129}
1130
1131static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1132{
1133 NAPI_GRO_CB(skb)->data_offset += len;
1134}
1135
Herbert Xua5b1cf22009-05-26 18:50:28 +00001136static inline void *skb_gro_header_fast(struct sk_buff *skb,
1137 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00001138{
Herbert Xu78a478d2009-05-26 18:50:21 +00001139 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00001140}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Herbert Xua5b1cf22009-05-26 18:50:28 +00001142static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1143{
1144 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1145}
1146
1147static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1148 unsigned int offset)
1149{
1150 NAPI_GRO_CB(skb)->frag0 = NULL;
1151 NAPI_GRO_CB(skb)->frag0_len = 0;
1152 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1153}
1154
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001155static inline void *skb_gro_mac_header(struct sk_buff *skb)
1156{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001157 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001158}
1159
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001160static inline void *skb_gro_network_header(struct sk_buff *skb)
1161{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001162 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1163 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001164}
1165
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001166static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1167 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001168 const void *daddr, const void *saddr,
1169 unsigned len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001170{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02001171 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001172 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001173
1174 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001175}
1176
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001177static inline int dev_parse_header(const struct sk_buff *skb,
1178 unsigned char *haddr)
1179{
1180 const struct net_device *dev = skb->dev;
1181
Patrick McHardy1b833362007-10-18 05:09:28 -07001182 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001183 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001184 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001185}
1186
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1188extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1189static inline int unregister_gifconf(unsigned int family)
1190{
1191 return register_gifconf(family, NULL);
1192}
1193
1194/*
1195 * Incoming packets are placed on per-cpu queues so that
1196 * no locking is needed.
1197 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198struct softnet_data
1199{
David S. Miller37437bb2008-07-16 02:15:04 -07001200 struct Qdisc *output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 struct sk_buff_head input_pkt_queue;
1202 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 struct sk_buff *completion_queue;
1204
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001205 struct napi_struct backlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206};
1207
1208DECLARE_PER_CPU(struct softnet_data,softnet_data);
1209
1210#define HAVE_NETIF_QUEUE
1211
David S. Miller37437bb2008-07-16 02:15:04 -07001212extern void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -07001213
1214static inline void netif_schedule_queue(struct netdev_queue *txq)
1215{
David S. Miller79d16382008-07-08 23:14:46 -07001216 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001217 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -07001218}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001220static inline void netif_tx_schedule_all(struct net_device *dev)
1221{
1222 unsigned int i;
1223
1224 for (i = 0; i < dev->num_tx_queues; i++)
1225 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1226}
1227
Dave Jonesd29f7492008-07-22 14:09:06 -07001228static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1229{
1230 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1231}
1232
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001233/**
1234 * netif_start_queue - allow transmit
1235 * @dev: network device
1236 *
1237 * Allow upper layers to call the device hard_start_xmit routine.
1238 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239static inline void netif_start_queue(struct net_device *dev)
1240{
David S. Millere8a04642008-07-17 00:34:19 -07001241 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
1243
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001244static inline void netif_tx_start_all_queues(struct net_device *dev)
1245{
1246 unsigned int i;
1247
1248 for (i = 0; i < dev->num_tx_queues; i++) {
1249 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1250 netif_tx_start_queue(txq);
1251 }
1252}
1253
David S. Miller79d16382008-07-08 23:14:46 -07001254static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255{
1256#ifdef CONFIG_NETPOLL_TRAP
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001257 if (netpoll_trap()) {
David S. Miller79d16382008-07-08 23:14:46 -07001258 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 return;
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261#endif
David S. Miller79d16382008-07-08 23:14:46 -07001262 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001263 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07001264}
1265
Dave Jonesd29f7492008-07-22 14:09:06 -07001266/**
1267 * netif_wake_queue - restart transmit
1268 * @dev: network device
1269 *
1270 * Allow upper layers to call the device hard_start_xmit routine.
1271 * Used for flow control when transmit resources are available.
1272 */
David S. Miller79d16382008-07-08 23:14:46 -07001273static inline void netif_wake_queue(struct net_device *dev)
1274{
David S. Millere8a04642008-07-17 00:34:19 -07001275 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276}
1277
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001278static inline void netif_tx_wake_all_queues(struct net_device *dev)
1279{
1280 unsigned int i;
1281
1282 for (i = 0; i < dev->num_tx_queues; i++) {
1283 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1284 netif_tx_wake_queue(txq);
1285 }
1286}
1287
Dave Jonesd29f7492008-07-22 14:09:06 -07001288static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1289{
1290 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1291}
1292
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001293/**
1294 * netif_stop_queue - stop transmitted packets
1295 * @dev: network device
1296 *
1297 * Stop upper layers calling the device hard_start_xmit routine.
1298 * Used for flow control when transmit resources are unavailable.
1299 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300static inline void netif_stop_queue(struct net_device *dev)
1301{
David S. Millere8a04642008-07-17 00:34:19 -07001302 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303}
1304
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001305static inline void netif_tx_stop_all_queues(struct net_device *dev)
1306{
1307 unsigned int i;
1308
1309 for (i = 0; i < dev->num_tx_queues; i++) {
1310 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1311 netif_tx_stop_queue(txq);
1312 }
1313}
1314
Dave Jonesd29f7492008-07-22 14:09:06 -07001315static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1316{
1317 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1318}
1319
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001320/**
1321 * netif_queue_stopped - test if transmit queue is flowblocked
1322 * @dev: network device
1323 *
1324 * Test if transmit queue on device is currently unable to send.
1325 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326static inline int netif_queue_stopped(const struct net_device *dev)
1327{
David S. Millere8a04642008-07-17 00:34:19 -07001328 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329}
1330
David S. Millerc3f26a22008-07-31 16:58:50 -07001331static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1332{
1333 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1334}
1335
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001336/**
1337 * netif_running - test if up
1338 * @dev: network device
1339 *
1340 * Test if the device has been brought up.
1341 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342static inline int netif_running(const struct net_device *dev)
1343{
1344 return test_bit(__LINK_STATE_START, &dev->state);
1345}
1346
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001347/*
1348 * Routines to manage the subqueues on a device. We only need start
1349 * stop, and a check if it's stopped. All other device management is
1350 * done at the overall netdevice level.
1351 * Also test the device if we're multiqueue.
1352 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001353
1354/**
1355 * netif_start_subqueue - allow sending packets on subqueue
1356 * @dev: network device
1357 * @queue_index: sub queue index
1358 *
1359 * Start individual transmit queue of a device with multiple transmit queues.
1360 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001361static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1362{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001363 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1364 clear_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001365}
1366
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001367/**
1368 * netif_stop_subqueue - stop sending packets on subqueue
1369 * @dev: network device
1370 * @queue_index: sub queue index
1371 *
1372 * Stop individual transmit queue of a device with multiple transmit queues.
1373 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001374static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1375{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001376 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001377#ifdef CONFIG_NETPOLL_TRAP
1378 if (netpoll_trap())
1379 return;
1380#endif
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001381 set_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001382}
1383
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001384/**
1385 * netif_subqueue_stopped - test status of subqueue
1386 * @dev: network device
1387 * @queue_index: sub queue index
1388 *
1389 * Check individual transmit queue of a device with multiple transmit queues.
1390 */
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001391static inline int __netif_subqueue_stopped(const struct net_device *dev,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001392 u16 queue_index)
1393{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001394 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1395 return test_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001396}
1397
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001398static inline int netif_subqueue_stopped(const struct net_device *dev,
1399 struct sk_buff *skb)
1400{
1401 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1402}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403
1404/**
1405 * netif_wake_subqueue - allow sending packets on subqueue
1406 * @dev: network device
1407 * @queue_index: sub queue index
1408 *
1409 * Resume individual transmit queue of a device with multiple transmit queues.
1410 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001411static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1412{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001413 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001414#ifdef CONFIG_NETPOLL_TRAP
1415 if (netpoll_trap())
1416 return;
1417#endif
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001418 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001419 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001420}
1421
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001422/**
1423 * netif_is_multiqueue - test if device has multiple transmit queues
1424 * @dev: network device
1425 *
1426 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001427 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001428static inline int netif_is_multiqueue(const struct net_device *dev)
1429{
David S. Miller09e83b52008-07-17 01:52:12 -07001430 return (dev->num_tx_queues > 1);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001431}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433/* Use this variant when it is known for sure that it
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001434 * is executing from hardware interrupt context or with hardware interrupts
1435 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001437extern void dev_kfree_skb_irq(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439/* Use this variant in places where it could be invoked
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001440 * from either hardware interrupt or other context, with hardware interrupts
1441 * either disabled or enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001443extern void dev_kfree_skb_any(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
1445#define HAVE_NETIF_RX 1
1446extern int netif_rx(struct sk_buff *skb);
1447extern int netif_rx_ni(struct sk_buff *skb);
1448#define HAVE_NETIF_RECEIVE_SKB 1
1449extern int netif_receive_skb(struct sk_buff *skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08001450extern void napi_gro_flush(struct napi_struct *napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08001451extern int dev_gro_receive(struct napi_struct *napi,
1452 struct sk_buff *skb);
Herbert Xu5d0d9be2009-01-29 14:19:48 +00001453extern int napi_skb_finish(int ret, struct sk_buff *skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08001454extern int napi_gro_receive(struct napi_struct *napi,
1455 struct sk_buff *skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08001456extern void napi_reuse_skb(struct napi_struct *napi,
1457 struct sk_buff *skb);
Herbert Xu76620aa2009-04-16 02:02:07 -07001458extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
Herbert Xu5d0d9be2009-01-29 14:19:48 +00001459extern int napi_frags_finish(struct napi_struct *napi,
1460 struct sk_buff *skb, int ret);
Herbert Xu76620aa2009-04-16 02:02:07 -07001461extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
1462extern int napi_gro_frags(struct napi_struct *napi);
1463
1464static inline void napi_free_frags(struct napi_struct *napi)
1465{
1466 kfree_skb(napi->skb);
1467 napi->skb = NULL;
1468}
1469
Patrick McHardybc1d0412008-07-14 22:49:30 -07001470extern void netif_nit_deliver(struct sk_buff *skb);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08001471extern int dev_valid_name(const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001472extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1473extern int dev_ethtool(struct net *net, struct ifreq *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474extern unsigned dev_get_flags(const struct net_device *);
1475extern int dev_change_flags(struct net_device *, unsigned);
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001476extern int dev_change_name(struct net_device *, const char *);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001477extern int dev_set_alias(struct net_device *, const char *, size_t);
Eric W. Biedermance286d32007-09-12 13:53:49 +02001478extern int dev_change_net_namespace(struct net_device *,
1479 struct net *, const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480extern int dev_set_mtu(struct net_device *, int);
1481extern int dev_set_mac_address(struct net_device *,
1482 struct sockaddr *);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001483extern int dev_hard_start_xmit(struct sk_buff *skb,
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001484 struct net_device *dev,
1485 struct netdev_queue *txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001487extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489/* Called by rtnetlink.c:rtnl_unlock() */
1490extern void netdev_run_todo(void);
1491
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001492/**
1493 * dev_put - release reference to device
1494 * @dev: network device
1495 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001496 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001497 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498static inline void dev_put(struct net_device *dev)
1499{
1500 atomic_dec(&dev->refcnt);
1501}
1502
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001503/**
1504 * dev_hold - get reference to device
1505 * @dev: network device
1506 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001507 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001508 */
Stephen Hemminger15333062006-03-20 22:32:28 -08001509static inline void dev_hold(struct net_device *dev)
1510{
1511 atomic_inc(&dev->refcnt);
1512}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
1514/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1515 * and _off may be called from IRQ context, but it is caller
1516 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08001517 *
1518 * The name carrier is inappropriate, these functions should really be
1519 * called netif_lowerlayer_*() because they represent the state of any
1520 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 */
1522
1523extern void linkwatch_fire_event(struct net_device *dev);
1524
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001525/**
1526 * netif_carrier_ok - test if carrier present
1527 * @dev: network device
1528 *
1529 * Check if carrier is present on device
1530 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531static inline int netif_carrier_ok(const struct net_device *dev)
1532{
1533 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1534}
1535
Eric Dumazet9d214932009-05-17 20:55:16 -07001536extern unsigned long dev_trans_start(struct net_device *dev);
1537
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538extern void __netdev_watchdog_up(struct net_device *dev);
1539
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001540extern void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001542extern void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001544/**
1545 * netif_dormant_on - mark device as dormant.
1546 * @dev: network device
1547 *
1548 * Mark device as dormant (as per RFC2863).
1549 *
1550 * The dormant state indicates that the relevant interface is not
1551 * actually in a condition to pass packets (i.e., it is not 'up') but is
1552 * in a "pending" state, waiting for some external event. For "on-
1553 * demand" interfaces, this new state identifies the situation where the
1554 * interface is waiting for events to place it in the up state.
1555 *
1556 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001557static inline void netif_dormant_on(struct net_device *dev)
1558{
1559 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1560 linkwatch_fire_event(dev);
1561}
1562
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001563/**
1564 * netif_dormant_off - set device as not dormant.
1565 * @dev: network device
1566 *
1567 * Device is not in dormant state.
1568 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001569static inline void netif_dormant_off(struct net_device *dev)
1570{
1571 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1572 linkwatch_fire_event(dev);
1573}
1574
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001575/**
1576 * netif_dormant - test if carrier present
1577 * @dev: network device
1578 *
1579 * Check if carrier is present on device
1580 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001581static inline int netif_dormant(const struct net_device *dev)
1582{
1583 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1584}
1585
1586
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001587/**
1588 * netif_oper_up - test if device is operational
1589 * @dev: network device
1590 *
1591 * Check if carrier is operational
1592 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001593static inline int netif_oper_up(const struct net_device *dev) {
1594 return (dev->operstate == IF_OPER_UP ||
1595 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1596}
1597
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001598/**
1599 * netif_device_present - is device available or removed
1600 * @dev: network device
1601 *
1602 * Check if device has not been removed from system.
1603 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604static inline int netif_device_present(struct net_device *dev)
1605{
1606 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1607}
1608
Denis Vlasenko56079432006-03-29 15:57:29 -08001609extern void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Denis Vlasenko56079432006-03-29 15:57:29 -08001611extern void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613/*
1614 * Network interface message level settings
1615 */
1616#define HAVE_NETIF_MSG 1
1617
1618enum {
1619 NETIF_MSG_DRV = 0x0001,
1620 NETIF_MSG_PROBE = 0x0002,
1621 NETIF_MSG_LINK = 0x0004,
1622 NETIF_MSG_TIMER = 0x0008,
1623 NETIF_MSG_IFDOWN = 0x0010,
1624 NETIF_MSG_IFUP = 0x0020,
1625 NETIF_MSG_RX_ERR = 0x0040,
1626 NETIF_MSG_TX_ERR = 0x0080,
1627 NETIF_MSG_TX_QUEUED = 0x0100,
1628 NETIF_MSG_INTR = 0x0200,
1629 NETIF_MSG_TX_DONE = 0x0400,
1630 NETIF_MSG_RX_STATUS = 0x0800,
1631 NETIF_MSG_PKTDATA = 0x1000,
1632 NETIF_MSG_HW = 0x2000,
1633 NETIF_MSG_WOL = 0x4000,
1634};
1635
1636#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1637#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1638#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1639#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1640#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1641#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1642#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1643#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1644#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1645#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1646#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1647#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1648#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1649#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1650#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1651
1652static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1653{
1654 /* use default */
1655 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1656 return default_msg_enable_bits;
1657 if (debug_value == 0) /* no output */
1658 return 0;
1659 /* set low N bits */
1660 return (1 << debug_value) - 1;
1661}
1662
David S. Millerc773e842008-07-08 23:13:53 -07001663static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07001664{
David S. Millerc773e842008-07-08 23:13:53 -07001665 spin_lock(&txq->_xmit_lock);
1666 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001667}
1668
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001669static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1670{
1671 spin_lock_bh(&txq->_xmit_lock);
1672 txq->xmit_lock_owner = smp_processor_id();
1673}
1674
David S. Millerc773e842008-07-08 23:13:53 -07001675static inline int __netif_tx_trylock(struct netdev_queue *txq)
1676{
1677 int ok = spin_trylock(&txq->_xmit_lock);
1678 if (likely(ok))
1679 txq->xmit_lock_owner = smp_processor_id();
1680 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07001681}
1682
David S. Millerc773e842008-07-08 23:13:53 -07001683static inline void __netif_tx_unlock(struct netdev_queue *txq)
1684{
1685 txq->xmit_lock_owner = -1;
1686 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001687}
1688
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001689static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1690{
1691 txq->xmit_lock_owner = -1;
1692 spin_unlock_bh(&txq->_xmit_lock);
1693}
1694
Eric Dumazet08baf562009-05-25 22:58:01 -07001695static inline void txq_trans_update(struct netdev_queue *txq)
1696{
1697 if (txq->xmit_lock_owner != -1)
1698 txq->trans_start = jiffies;
1699}
1700
David S. Millerc3f26a22008-07-31 16:58:50 -07001701/**
1702 * netif_tx_lock - grab network device transmit lock
1703 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07001704 *
1705 * Get network device transmit lock
1706 */
1707static inline void netif_tx_lock(struct net_device *dev)
1708{
1709 unsigned int i;
1710 int cpu;
1711
1712 spin_lock(&dev->tx_global_lock);
1713 cpu = smp_processor_id();
1714 for (i = 0; i < dev->num_tx_queues; i++) {
1715 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1716
1717 /* We are the only thread of execution doing a
1718 * freeze, but we have to grab the _xmit_lock in
1719 * order to synchronize with threads which are in
1720 * the ->hard_start_xmit() handler and already
1721 * checked the frozen bit.
1722 */
1723 __netif_tx_lock(txq, cpu);
1724 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1725 __netif_tx_unlock(txq);
1726 }
1727}
1728
1729static inline void netif_tx_lock_bh(struct net_device *dev)
1730{
1731 local_bh_disable();
1732 netif_tx_lock(dev);
1733}
1734
Herbert Xu932ff272006-06-09 12:20:56 -07001735static inline void netif_tx_unlock(struct net_device *dev)
1736{
David S. Millere8a04642008-07-17 00:34:19 -07001737 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07001738
David S. Millere8a04642008-07-17 00:34:19 -07001739 for (i = 0; i < dev->num_tx_queues; i++) {
1740 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07001741
David S. Millerc3f26a22008-07-31 16:58:50 -07001742 /* No need to grab the _xmit_lock here. If the
1743 * queue is not stopped for another reason, we
1744 * force a schedule.
1745 */
1746 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1747 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1748 __netif_schedule(txq->qdisc);
1749 }
1750 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001751}
1752
1753static inline void netif_tx_unlock_bh(struct net_device *dev)
1754{
David S. Millere8a04642008-07-17 00:34:19 -07001755 netif_tx_unlock(dev);
1756 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07001757}
1758
David S. Millerc773e842008-07-08 23:13:53 -07001759#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001760 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001761 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001762 } \
1763}
1764
David S. Millerc773e842008-07-08 23:13:53 -07001765#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001766 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001767 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001768 } \
1769}
1770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771static inline void netif_tx_disable(struct net_device *dev)
1772{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001773 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07001774 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001775
David S. Millerc3f26a22008-07-31 16:58:50 -07001776 local_bh_disable();
1777 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001778 for (i = 0; i < dev->num_tx_queues; i++) {
1779 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07001780
1781 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001782 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07001783 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001784 }
David S. Millerc3f26a22008-07-31 16:58:50 -07001785 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786}
1787
David S. Millere308a5d2008-07-15 00:13:44 -07001788static inline void netif_addr_lock(struct net_device *dev)
1789{
1790 spin_lock(&dev->addr_list_lock);
1791}
1792
1793static inline void netif_addr_lock_bh(struct net_device *dev)
1794{
1795 spin_lock_bh(&dev->addr_list_lock);
1796}
1797
1798static inline void netif_addr_unlock(struct net_device *dev)
1799{
1800 spin_unlock(&dev->addr_list_lock);
1801}
1802
1803static inline void netif_addr_unlock_bh(struct net_device *dev)
1804{
1805 spin_unlock_bh(&dev->addr_list_lock);
1806}
1807
Jiri Pirkof001fde2009-05-05 02:48:28 +00001808/*
1809 * dev_addr_list walker. Should be used only for read access. Call with
1810 * rcu_read_lock held.
1811 */
1812#define for_each_dev_addr(dev, ha) \
1813 list_for_each_entry_rcu(ha, &dev->dev_addr_list, list)
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1816
1817extern void ether_setup(struct net_device *dev);
1818
1819/* Support for loadable net-drivers */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001820extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1821 void (*setup)(struct net_device *),
1822 unsigned int queue_count);
1823#define alloc_netdev(sizeof_priv, name, setup) \
1824 alloc_netdev_mq(sizeof_priv, name, setup, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825extern int register_netdev(struct net_device *dev);
1826extern void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00001827
1828/* Functions used for device addresses handling */
1829extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1830 unsigned char addr_type);
1831extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
1832 unsigned char addr_type);
1833extern int dev_addr_add_multiple(struct net_device *to_dev,
1834 struct net_device *from_dev,
1835 unsigned char addr_type);
1836extern int dev_addr_del_multiple(struct net_device *to_dev,
1837 struct net_device *from_dev,
1838 unsigned char addr_type);
1839
Patrick McHardy4417da62007-06-27 01:28:10 -07001840/* Functions used for secondary unicast and multicast support */
1841extern void dev_set_rx_mode(struct net_device *dev);
1842extern void __dev_set_rx_mode(struct net_device *dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00001843extern int dev_unicast_delete(struct net_device *dev, void *addr);
1844extern int dev_unicast_add(struct net_device *dev, void *addr);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001845extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1846extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1848extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
Patrick McHardya0a400d2007-07-14 18:52:02 -07001849extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1850extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07001851extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1852extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001853extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1854extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
Wang Chendad9b332008-06-18 01:48:28 -07001855extern int dev_set_promiscuity(struct net_device *dev, int inc);
1856extern int dev_set_allmulti(struct net_device *dev, int inc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857extern void netdev_state_change(struct net_device *dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001858extern void netdev_bonding_change(struct net_device *dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001859extern void netdev_features_change(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860/* Load a device via the kmod */
Eric W. Biederman881d9662007-09-17 11:56:21 -07001861extern void dev_load(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862extern void dev_mcast_init(void);
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08001863extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
1864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865extern int netdev_max_backlog;
1866extern int weight_p;
1867extern int netdev_set_master(struct net_device *dev, struct net_device *master);
Patrick McHardy84fa7932006-08-29 16:44:56 -07001868extern int skb_checksum_help(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001869extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
Herbert Xufb286bb2005-11-10 13:01:24 -08001870#ifdef CONFIG_BUG
1871extern void netdev_rx_csum_fault(struct net_device *dev);
1872#else
1873static inline void netdev_rx_csum_fault(struct net_device *dev)
1874{
1875}
1876#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877/* rx skb timestamps */
1878extern void net_enable_timestamp(void);
1879extern void net_disable_timestamp(void);
1880
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001881#ifdef CONFIG_PROC_FS
1882extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1883extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1884extern void dev_seq_stop(struct seq_file *seq, void *v);
1885#endif
1886
Jay Vosburghb8a97872008-06-13 18:12:04 -07001887extern int netdev_class_create_file(struct class_attribute *class_attr);
1888extern void netdev_class_remove_file(struct class_attribute *class_attr);
1889
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001890extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
Arjan van de Ven6579e572008-07-21 13:31:48 -07001891
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001892extern void linkwatch_run_queue(void);
1893
Herbert Xub63365a2008-10-23 01:11:29 -07001894unsigned long netdev_increment_features(unsigned long all, unsigned long one,
1895 unsigned long mask);
1896unsigned long netdev_fix_features(unsigned long features, const char *name);
Herbert Xu7f353bf2007-08-10 15:47:58 -07001897
Herbert Xubcd76112006-06-30 13:36:35 -07001898static inline int net_gso_ok(int features, int gso_type)
1899{
1900 int feature = gso_type << NETIF_F_GSO_SHIFT;
1901 return (features & feature) == feature;
1902}
1903
Herbert Xu576a30e2006-06-27 13:22:38 -07001904static inline int skb_gso_ok(struct sk_buff *skb, int features)
1905{
Herbert Xu278b2512009-06-03 21:20:51 -07001906 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
1907 (!skb_shinfo(skb)->frag_list || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07001908}
1909
Herbert Xu79671682006-06-22 02:40:14 -07001910static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1911{
Herbert Xua430a432006-07-08 13:34:56 -07001912 return skb_is_gso(skb) &&
1913 (!skb_gso_ok(skb, dev->features) ||
Patrick McHardy84fa7932006-08-29 16:44:56 -07001914 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
Herbert Xu79671682006-06-22 02:40:14 -07001915}
1916
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001917static inline void netif_set_gso_max_size(struct net_device *dev,
1918 unsigned int size)
1919{
1920 dev->gso_max_size = size;
1921}
1922
Jiri Pirko5d4e0392009-05-28 01:05:00 +00001923static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
1924 struct net_device *master)
1925{
1926 if (skb->pkt_type == PACKET_HOST) {
1927 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
1928
1929 memcpy(dest, master->dev_addr, ETH_ALEN);
1930 }
1931}
1932
David S. Miller7ea49ed2006-08-14 17:08:36 -07001933/* On bonding slaves other than the currently active slave, suppress
Jay Vosburghf5b2b962006-09-22 21:54:53 -07001934 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1935 * ARP on active-backup slaves with arp_validate enabled.
David S. Miller7ea49ed2006-08-14 17:08:36 -07001936 */
1937static inline int skb_bond_should_drop(struct sk_buff *skb)
1938{
1939 struct net_device *dev = skb->dev;
1940 struct net_device *master = dev->master;
1941
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08001942 if (master) {
1943 if (master->priv_flags & IFF_MASTER_ARPMON)
1944 dev->last_rx = jiffies;
Jay Vosburghf5b2b962006-09-22 21:54:53 -07001945
Jiri Pirko5d4e0392009-05-28 01:05:00 +00001946 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
1947 /* Do address unmangle. The local destination address
1948 * will be always the one master has. Provides the right
1949 * functionality in a bridge.
1950 */
1951 skb_bond_set_mac_by_master(skb, master);
1952 }
1953
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08001954 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1955 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
Harvey Harrisonf3a7c662009-02-14 22:58:35 -08001956 skb->protocol == __cpu_to_be16(ETH_P_ARP))
David S. Miller7ea49ed2006-08-14 17:08:36 -07001957 return 0;
David S. Miller7ea49ed2006-08-14 17:08:36 -07001958
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08001959 if (master->priv_flags & IFF_MASTER_ALB) {
1960 if (skb->pkt_type != PACKET_BROADCAST &&
1961 skb->pkt_type != PACKET_MULTICAST)
1962 return 0;
1963 }
1964 if (master->priv_flags & IFF_MASTER_8023AD &&
Harvey Harrisonf3a7c662009-02-14 22:58:35 -08001965 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08001966 return 0;
1967
1968 return 1;
1969 }
David S. Miller7ea49ed2006-08-14 17:08:36 -07001970 }
1971 return 0;
1972}
1973
Eric W. Biederman505d4f72008-11-07 22:54:20 -08001974extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00001975
1976static inline int dev_ethtool_get_settings(struct net_device *dev,
1977 struct ethtool_cmd *cmd)
1978{
1979 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
1980 return -EOPNOTSUPP;
1981 return dev->ethtool_ops->get_settings(dev, cmd);
1982}
1983
1984static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
1985{
1986 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
1987 return 0;
1988 return dev->ethtool_ops->get_rx_csum(dev);
1989}
1990
1991static inline u32 dev_ethtool_get_flags(struct net_device *dev)
1992{
1993 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
1994 return 0;
1995 return dev->ethtool_ops->get_flags(dev);
1996}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997#endif /* __KERNEL__ */
1998
Jiri Pirko385a1542009-05-27 15:48:07 -07001999#endif /* _LINUX_NETDEVICE_H */