blob: e535700a3b72d0a2ebdf634bf7fa561a64d5165e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070014 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
Al Virod7fe0f22006-12-03 23:15:30 -050033#include <linux/timer.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070034#include <linux/delay.h>
Dmitri Vorobievcc0be322009-03-27 15:55:36 -070035#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/atomic.h>
37#include <asm/cache.h>
38#include <asm/byteorder.h>
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/device.h>
41#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070042#include <linux/rculist.h>
Chris Leechdb217332006-06-17 21:24:58 -070043#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070044#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000046#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020047#include <net/net_namespace.h>
Lennert Buytenhekcf85d082008-10-07 13:45:02 +000048#include <net/dsa.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080049#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080050#include <net/dcbnl.h>
51#endif
Daniel Lezcanoa050c332007-09-12 14:57:09 +020052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053struct vlan_group;
Jeff Moyer115c1d62005-06-22 22:05:31 -070054struct netpoll_info;
Johannes Berg704232c2007-04-23 12:20:05 -070055/* 802.11 specific */
56struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 /* source back-compat hooks */
58#define SET_ETHTOOL_OPS(netdev,ops) \
59 ( (netdev)->ethtool_ops = (ops) )
60
61#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
62 functions are available. */
63#define HAVE_FREE_NETDEV /* free_netdev() */
64#define HAVE_NETDEV_PRIV /* netdev_priv() */
65
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000066/* Backlog congestion levels */
67#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
68#define NET_RX_DROP 1 /* packet dropped */
69
Patrick McHardy572a9d72009-11-10 06:14:14 +000070/*
71 * Transmit return codes: transmit return codes originate from three different
72 * namespaces:
73 *
74 * - qdisc return codes
75 * - driver transmit return codes
76 * - errno values
77 *
78 * Drivers are allowed to return any one of those in their hard_start_xmit()
79 * function. Real network devices commonly used with qdiscs should only return
80 * the driver transmit return codes though - when qdiscs are used, the actual
81 * transmission happens asynchronously, so the value is not propagated to
82 * higher layers. Virtual network devices transmit synchronously, in this case
83 * the driver transmit return codes are consumed by dev_queue_xmit(), all
84 * others are propagated to higher layers.
85 */
86
87/* qdisc ->enqueue() return codes. */
88#define NET_XMIT_SUCCESS 0x00
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000089#define NET_XMIT_DROP 0x01 /* skb dropped */
90#define NET_XMIT_CN 0x02 /* congestion notification */
91#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
92#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -020094/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
95 * indicates that the device will soon be dropping packets, or already drops
96 * some packets of the same priority; prompting us to send less aggressively. */
Patrick McHardy572a9d72009-11-10 06:14:14 +000097#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
99
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000100/* Driver transmit return codes */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000101#define NETDEV_TX_MASK 0xf0
Patrick McHardy572a9d72009-11-10 06:14:14 +0000102
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000103enum netdev_tx {
Patrick McHardy572a9d72009-11-10 06:14:14 +0000104 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000105 NETDEV_TX_OK = 0x00, /* driver took care of packet */
106 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
107 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000108};
109typedef enum netdev_tx netdev_tx_t;
110
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000111/*
112 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
113 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
114 */
115static inline bool dev_xmit_complete(int rc)
116{
117 /*
118 * Positive cases with an skb consumed by a driver:
119 * - successful transmission (rc == NETDEV_TX_OK)
120 * - error while transmitting (rc < 0)
121 * - error while queueing to a different device (rc & NET_XMIT_MASK)
122 */
123 if (likely(rc < NET_XMIT_MASK))
124 return true;
125
126 return false;
127}
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#endif
130
131#define MAX_ADDR_LEN 32 /* Largest hardware address length */
132
Adrian Bunkc88e6f52008-06-27 19:54:54 -0700133#ifdef __KERNEL__
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134/*
135 * Compute the worst case header length according to the protocols
136 * used.
137 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800138
David S. Miller8388e3d2008-05-12 20:17:33 -0700139#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
140# if defined(CONFIG_MAC80211_MESH)
141# define LL_MAX_HEADER 128
142# else
143# define LL_MAX_HEADER 96
144# endif
Adrian Bunkc759a6b2009-04-27 02:36:20 -0700145#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
David S. Miller8388e3d2008-05-12 20:17:33 -0700146# define LL_MAX_HEADER 48
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700148# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149#endif
150
David S. Millere81c73592006-11-28 20:53:39 -0800151#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
152 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
153 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
154 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155#define MAX_HEADER LL_MAX_HEADER
156#else
157#define MAX_HEADER (LL_MAX_HEADER + 48)
158#endif
159
Adrian Bunkc88e6f52008-06-27 19:54:54 -0700160#endif /* __KERNEL__ */
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
163 * Network device statistics. Akin to the 2.0 ether stats but
164 * with byte counters.
165 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800166
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800167struct net_device_stats {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 unsigned long rx_packets; /* total packets received */
169 unsigned long tx_packets; /* total packets transmitted */
170 unsigned long rx_bytes; /* total bytes received */
171 unsigned long tx_bytes; /* total bytes transmitted */
172 unsigned long rx_errors; /* bad packets received */
173 unsigned long tx_errors; /* packet transmit problems */
174 unsigned long rx_dropped; /* no space in linux buffers */
175 unsigned long tx_dropped; /* no space available in linux */
176 unsigned long multicast; /* multicast packets received */
177 unsigned long collisions;
178
179 /* detailed rx_errors: */
180 unsigned long rx_length_errors;
181 unsigned long rx_over_errors; /* receiver ring buff overflow */
182 unsigned long rx_crc_errors; /* recved pkt with crc error */
183 unsigned long rx_frame_errors; /* recv'd frame alignment error */
184 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
185 unsigned long rx_missed_errors; /* receiver missed packet */
186
187 /* detailed tx_errors */
188 unsigned long tx_aborted_errors;
189 unsigned long tx_carrier_errors;
190 unsigned long tx_fifo_errors;
191 unsigned long tx_heartbeat_errors;
192 unsigned long tx_window_errors;
193
194 /* for cslip etc */
195 unsigned long rx_compressed;
196 unsigned long tx_compressed;
197};
198
199
200/* Media selection options. */
201enum {
202 IF_PORT_UNKNOWN = 0,
203 IF_PORT_10BASE2,
204 IF_PORT_10BASET,
205 IF_PORT_AUI,
206 IF_PORT_100BASET,
207 IF_PORT_100BASETX,
208 IF_PORT_100BASEFX
209};
210
211#ifdef __KERNEL__
212
213#include <linux/cache.h>
214#include <linux/skbuff.h>
215
216struct neighbour;
217struct neigh_parms;
218struct sk_buff;
219
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800220struct netif_rx_stats {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 unsigned total;
222 unsigned dropped;
223 unsigned time_squeeze;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 unsigned cpu_collision;
225};
226
227DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
228
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800229struct dev_addr_list {
Patrick McHardybf742482007-06-27 01:26:19 -0700230 struct dev_addr_list *next;
231 u8 da_addr[MAX_ADDR_LEN];
232 u8 da_addrlen;
Patrick McHardya0a400d2007-07-14 18:52:02 -0700233 u8 da_synced;
Patrick McHardybf742482007-06-27 01:26:19 -0700234 int da_users;
235 int da_gusers;
236};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238/*
239 * We tag multicasts with these structures.
240 */
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700241
242#define dev_mc_list dev_addr_list
243#define dmi_addr da_addr
244#define dmi_addrlen da_addrlen
245#define dmi_users da_users
246#define dmi_gusers da_gusers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Jiri Pirkof001fde2009-05-05 02:48:28 +0000248struct netdev_hw_addr {
249 struct list_head list;
250 unsigned char addr[MAX_ADDR_LEN];
251 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000252#define NETDEV_HW_ADDR_T_LAN 1
253#define NETDEV_HW_ADDR_T_SAN 2
254#define NETDEV_HW_ADDR_T_SLAVE 3
255#define NETDEV_HW_ADDR_T_UNICAST 4
256 int refcount;
257 bool synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000258 struct rcu_head rcu_head;
259};
260
Jiri Pirko31278e72009-06-17 01:12:19 +0000261struct netdev_hw_addr_list {
262 struct list_head list;
263 int count;
264};
265
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800266#define netdev_uc_count(dev) ((dev)->uc.count)
267#define netdev_uc_empty(dev) ((dev)->uc.count == 0)
268#define netdev_for_each_uc_addr(ha, dev) \
269 list_for_each_entry(ha, &dev->uc.list, list)
270
Jiri Pirko6683ece2010-02-04 10:22:25 -0800271#define netdev_mc_count(dev) ((dev)->mc_count)
272#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
273
274#define netdev_for_each_mc_addr(mclist, dev) \
275 for (mclist = dev->mc_list; mclist; mclist = mclist->next)
276
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800277struct hh_cache {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 struct hh_cache *hh_next; /* Next entry */
279 atomic_t hh_refcnt; /* number of users */
Eric Dumazetf0490982006-12-08 00:08:43 -0800280/*
281 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
282 * cache line on SMP.
283 * They are mostly read, but hh_refcnt may be changed quite frequently,
284 * incurring cache line ping pongs.
285 */
286 __be16 hh_type ____cacheline_aligned_in_smp;
287 /* protocol identifier, f.e ETH_P_IP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 * NOTE: For VLANs, this will be the
289 * encapuslated type. --BLG
290 */
Arnaldo Carvalho de Melod5c42c02006-11-27 17:58:02 -0200291 u16 hh_len; /* length of header */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 int (*hh_output)(struct sk_buff *skb);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800293 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 /* cached hardware header; allow for machine alignment needs. */
296#define HH_DATA_MOD 16
297#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700298 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299#define HH_DATA_ALIGN(__len) \
300 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
301 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
302};
303
304/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
305 * Alternative is:
306 * dev->hard_header_len ? (dev->hard_header_len +
307 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
308 *
309 * We could use other alignment values, but we must maintain the
310 * relationship HH alignment <= LL alignment.
Johannes Bergf5184d22008-05-12 20:48:31 -0700311 *
312 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
313 * may need.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 */
315#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700316 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700318 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
319#define LL_ALLOCATED_SPACE(dev) \
320 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700322struct header_ops {
323 int (*create) (struct sk_buff *skb, struct net_device *dev,
324 unsigned short type, const void *daddr,
325 const void *saddr, unsigned len);
326 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
327 int (*rebuild)(struct sk_buff *skb);
328#define HAVE_HEADER_CACHE
329 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
330 void (*cache_update)(struct hh_cache *hh,
331 const struct net_device *dev,
332 const unsigned char *haddr);
333};
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335/* These flag bits are private to the generic network queueing
336 * layer, they may not be explicitly referenced by any other
337 * code.
338 */
339
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800340enum netdev_state_t {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 __LINK_STATE_START,
342 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800344 __LINK_STATE_LINKWATCH_PENDING,
345 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346};
347
348
349/*
350 * This structure holds at boot time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800351 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 */
353struct netdev_boot_setup {
354 char name[IFNAMSIZ];
355 struct ifmap map;
356};
357#define NETDEV_BOOT_SETUP_MAX 8
358
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -0300359extern int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700362 * Structure for NAPI scheduling similar to tasklet but with weighting
363 */
364struct napi_struct {
365 /* The poll_list must only be managed by the entity which
366 * changes the state of the NAPI_STATE_SCHED bit. This means
367 * whoever atomically sets that bit can add this napi_struct
368 * to the per-cpu poll_list, and whoever clears that bit
369 * can remove from the list right before clearing the bit.
370 */
371 struct list_head poll_list;
372
373 unsigned long state;
374 int weight;
375 int (*poll)(struct napi_struct *, int);
376#ifdef CONFIG_NETPOLL
377 spinlock_t poll_lock;
378 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700379#endif
Herbert Xu4ae55442009-02-08 18:00:36 +0000380
381 unsigned int gro_count;
382
Herbert Xu5d38a072009-01-04 16:13:40 -0800383 struct net_device *dev;
Herbert Xud565b0a2008-12-15 23:38:52 -0800384 struct list_head dev_list;
385 struct sk_buff *gro_list;
Herbert Xu5d38a072009-01-04 16:13:40 -0800386 struct sk_buff *skb;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700387};
388
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800389enum {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700390 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800391 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800392 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700393};
394
Ben Hutchings5b252f02009-10-29 07:17:09 +0000395enum gro_result {
Herbert Xud1c76af2009-03-16 10:50:02 -0700396 GRO_MERGED,
397 GRO_MERGED_FREE,
398 GRO_HELD,
399 GRO_NORMAL,
400 GRO_DROP,
401};
Ben Hutchings5b252f02009-10-29 07:17:09 +0000402typedef enum gro_result gro_result_t;
Herbert Xud1c76af2009-03-16 10:50:02 -0700403
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800404extern void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700405
David S. Millera0a46192008-01-07 20:35:07 -0800406static inline int napi_disable_pending(struct napi_struct *n)
407{
408 return test_bit(NAPI_STATE_DISABLE, &n->state);
409}
410
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700411/**
412 * napi_schedule_prep - check if napi can be scheduled
413 * @n: napi context
414 *
415 * Test if NAPI routine is already running, and if not mark
416 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800417 * insure only one NAPI poll instance runs. We also make
418 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700419 */
420static inline int napi_schedule_prep(struct napi_struct *n)
421{
David S. Millera0a46192008-01-07 20:35:07 -0800422 return !napi_disable_pending(n) &&
423 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700424}
425
426/**
427 * napi_schedule - schedule NAPI poll
428 * @n: napi context
429 *
430 * Schedule NAPI poll routine to be called if it is not already
431 * running.
432 */
433static inline void napi_schedule(struct napi_struct *n)
434{
435 if (napi_schedule_prep(n))
436 __napi_schedule(n);
437}
438
Roland Dreierbfe13f52007-10-09 15:47:37 -0700439/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
440static inline int napi_reschedule(struct napi_struct *napi)
441{
442 if (napi_schedule_prep(napi)) {
443 __napi_schedule(napi);
444 return 1;
445 }
446 return 0;
447}
448
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700449/**
450 * napi_complete - NAPI processing complete
451 * @n: napi context
452 *
453 * Mark NAPI processing as complete.
454 */
Herbert Xud565b0a2008-12-15 23:38:52 -0800455extern void __napi_complete(struct napi_struct *n);
456extern void napi_complete(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700457
458/**
459 * napi_disable - prevent NAPI from scheduling
460 * @n: napi context
461 *
462 * Stop NAPI from being scheduled on this context.
463 * Waits till any outstanding processing completes.
464 */
465static inline void napi_disable(struct napi_struct *n)
466{
David S. Millera0a46192008-01-07 20:35:07 -0800467 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700468 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700469 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800470 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700471}
472
473/**
474 * napi_enable - enable NAPI scheduling
475 * @n: napi context
476 *
477 * Resume NAPI from being scheduled on this context.
478 * Must be paired with napi_disable.
479 */
480static inline void napi_enable(struct napi_struct *n)
481{
482 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
483 smp_mb__before_clear_bit();
484 clear_bit(NAPI_STATE_SCHED, &n->state);
485}
486
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700487#ifdef CONFIG_SMP
488/**
489 * napi_synchronize - wait until NAPI is not running
490 * @n: napi context
491 *
492 * Wait until NAPI is done being scheduled on this context.
493 * Waits till any outstanding processing completes but
494 * does not disable future activations.
495 */
496static inline void napi_synchronize(const struct napi_struct *n)
497{
498 while (test_bit(NAPI_STATE_SCHED, &n->state))
499 msleep(1);
500}
501#else
502# define napi_synchronize(n) barrier()
503#endif
504
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800505enum netdev_queue_state_t {
David S. Miller79d16382008-07-08 23:14:46 -0700506 __QUEUE_STATE_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700507 __QUEUE_STATE_FROZEN,
David S. Miller79d16382008-07-08 23:14:46 -0700508};
509
David S. Millerbb949fb2008-07-08 16:55:56 -0700510struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700511/*
512 * read mostly part
513 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700514 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700515 struct Qdisc *qdisc;
David S. Miller79d16382008-07-08 23:14:46 -0700516 unsigned long state;
David S. Millerb0e1e642008-07-08 17:42:10 -0700517 struct Qdisc *qdisc_sleeping;
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700518/*
519 * write mostly part
520 */
521 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
522 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700523 /*
524 * please use this field instead of dev->trans_start
525 */
526 unsigned long trans_start;
Eric Dumazet7004bf22009-05-18 00:34:33 +0000527 unsigned long tx_bytes;
528 unsigned long tx_packets;
529 unsigned long tx_dropped;
David S. Millere8a04642008-07-17 00:34:19 -0700530} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700531
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800532
533/*
534 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800535 * The following hooks can be defined; unless noted otherwise, they are
536 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800537 *
538 * int (*ndo_init)(struct net_device *dev);
539 * This function is called once when network device is registered.
540 * The network device can use this to any late stage initializaton
541 * or semantic validattion. It can fail with an error code which will
542 * be propogated back to register_netdev
543 *
544 * void (*ndo_uninit)(struct net_device *dev);
545 * This function is called when device is unregistered or when registration
546 * fails. It is not called if init fails.
547 *
548 * int (*ndo_open)(struct net_device *dev);
549 * This function is called when network device transistions to the up
550 * state.
551 *
552 * int (*ndo_stop)(struct net_device *dev);
553 * This function is called when network device transistions to the down
554 * state.
555 *
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000556 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
557 * struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800558 * Called when a packet needs to be transmitted.
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000559 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
560 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
Stephen Hemminger00829822008-11-20 20:14:53 -0800561 * Required can not be NULL.
562 *
563 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
564 * Called to decide which queue to when device supports multiple
565 * transmit queues.
566 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800567 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
568 * This function is called to allow device receiver to make
569 * changes to configuration when multicast or promiscious is enabled.
570 *
571 * void (*ndo_set_rx_mode)(struct net_device *dev);
572 * This function is called device changes address list filtering.
573 *
574 * void (*ndo_set_multicast_list)(struct net_device *dev);
575 * This function is called when the multicast address list changes.
576 *
577 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
578 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -0700579 * needs to be changed. If this interface is not defined, the
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800580 * mac address can not be changed.
581 *
582 * int (*ndo_validate_addr)(struct net_device *dev);
583 * Test if Media Access Control address is valid for the device.
584 *
585 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
586 * Called when a user request an ioctl which can't be handled by
587 * the generic interface code. If not defined ioctl's return
588 * not supported error code.
589 *
590 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
591 * Used to set network devices bus interface parameters. This interface
592 * is retained for legacy reason, new devices should use the bus
593 * interface (PCI) for low level management.
594 *
595 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
596 * Called when a user wants to change the Maximum Transfer Unit
597 * of a device. If not defined, any request to change MTU will
598 * will return an error.
599 *
Stephen Hemminger00829822008-11-20 20:14:53 -0800600 * void (*ndo_tx_timeout)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800601 * Callback uses when the transmitter has not made any progress
602 * for dev->watchdog ticks.
603 *
Wolfram Sangd308e382009-10-07 13:53:11 -0700604 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800605 * Called when a user wants to get the network device usage
606 * statistics. If not defined, the counters in dev->stats will
607 * be used.
608 *
609 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
610 * If device support VLAN receive accleration
611 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
612 * when vlan groups for the device changes. Note: grp is NULL
613 * if no vlan's groups are being used.
614 *
615 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
616 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
617 * this function is called when a VLAN id is registered.
618 *
619 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
620 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
621 * this function is called when a VLAN id is unregistered.
622 *
623 * void (*ndo_poll_controller)(struct net_device *dev);
624 */
Stephen Hemminger47fd5b82008-11-25 00:20:43 -0800625#define HAVE_NET_DEVICE_OPS
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800626struct net_device_ops {
627 int (*ndo_init)(struct net_device *dev);
628 void (*ndo_uninit)(struct net_device *dev);
629 int (*ndo_open)(struct net_device *dev);
630 int (*ndo_stop)(struct net_device *dev);
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000631 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
Stephen Hemminger00829822008-11-20 20:14:53 -0800632 struct net_device *dev);
633 u16 (*ndo_select_queue)(struct net_device *dev,
634 struct sk_buff *skb);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800635 void (*ndo_change_rx_flags)(struct net_device *dev,
636 int flags);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800637 void (*ndo_set_rx_mode)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800638 void (*ndo_set_multicast_list)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800639 int (*ndo_set_mac_address)(struct net_device *dev,
640 void *addr);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800641 int (*ndo_validate_addr)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800642 int (*ndo_do_ioctl)(struct net_device *dev,
643 struct ifreq *ifr, int cmd);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800644 int (*ndo_set_config)(struct net_device *dev,
645 struct ifmap *map);
Stephen Hemminger00829822008-11-20 20:14:53 -0800646 int (*ndo_change_mtu)(struct net_device *dev,
647 int new_mtu);
648 int (*ndo_neigh_setup)(struct net_device *dev,
649 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800650 void (*ndo_tx_timeout) (struct net_device *dev);
651
652 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
653
654 void (*ndo_vlan_rx_register)(struct net_device *dev,
655 struct vlan_group *grp);
656 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
657 unsigned short vid);
658 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
659 unsigned short vid);
660#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800661 void (*ndo_poll_controller)(struct net_device *dev);
662#endif
Yi Zou4d288d52009-02-27 14:06:59 -0800663#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
Yi Zoucb454392009-08-31 12:31:36 +0000664 int (*ndo_fcoe_enable)(struct net_device *dev);
665 int (*ndo_fcoe_disable)(struct net_device *dev);
Yi Zou4d288d52009-02-27 14:06:59 -0800666 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
667 u16 xid,
668 struct scatterlist *sgl,
669 unsigned int sgc);
670 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
671 u16 xid);
Yi Zoudf5c7942009-10-28 18:24:35 +0000672#define NETDEV_FCOE_WWNN 0
673#define NETDEV_FCOE_WWPN 1
674 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
675 u64 *wwn, int type);
Yi Zou4d288d52009-02-27 14:06:59 -0800676#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800677};
678
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700679/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 * The DEVICE structure.
681 * Actually, this whole structure is a big mistake. It mixes I/O
682 * data with strictly "high-level" data, and it has to know about
683 * almost every data structure used in the INET module.
684 *
685 * FIXME: cleanup struct net_device such that network protocol info
686 * moves out.
687 */
688
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800689struct net_device {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
691 /*
692 * This is the first field of the "visible" part of this structure
693 * (i.e. as seen by users in the "Space.c" file). It is the name
694 * the interface.
695 */
696 char name[IFNAMSIZ];
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700697 /* device name hash chain */
698 struct hlist_node name_hlist;
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700699 /* snmp alias */
700 char *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 /*
703 * I/O specific fields
704 * FIXME: Merge these and struct ifmap into one
705 */
706 unsigned long mem_end; /* shared mem end */
707 unsigned long mem_start; /* shared mem start */
708 unsigned long base_addr; /* device I/O address */
709 unsigned int irq; /* device IRQ number */
710
711 /*
712 * Some hardware also needs these fields, but they are not
713 * part of the usual set specified in Space.c.
714 */
715
716 unsigned char if_port; /* Selectable AUI, TP,..*/
717 unsigned char dma; /* DMA channel */
718
719 unsigned long state;
720
Pavel Emelianov7562f872007-05-03 15:13:45 -0700721 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700722 struct list_head napi_list;
Eric Dumazet44a08732009-10-27 07:03:04 +0000723 struct list_head unreg_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700725 /* Net device features */
726 unsigned long features;
727#define NETIF_F_SG 1 /* Scatter/gather IO. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700728#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700729#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
730#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700731#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700732#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
733#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
734#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
735#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
736#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
737#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
Herbert Xu37c31852006-06-22 03:07:29 -0700738#define NETIF_F_GSO 2048 /* Enable software GSO. */
Christian Borntraegere24eb522007-09-25 19:42:02 -0700739#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
740 /* do not use LLTX in new drivers */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200741#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
Herbert Xud565b0a2008-12-15 23:38:52 -0800742#define NETIF_F_GRO 16384 /* Generic receive offload */
Jeff Garzik3ae7c0b2007-08-15 16:00:51 -0700743#define NETIF_F_LRO 32768 /* large receive offload */
Herbert Xu79671682006-06-22 02:40:14 -0700744
Jesse Brandeburg8dc92f72009-04-27 22:35:52 +0000745/* the GSO_MASK reserves bits 16 through 23 */
Chris Leech01d5b2f2009-02-27 14:06:49 -0800746#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
Jesse Brandeburg8dc92f72009-04-27 22:35:52 +0000747#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
Yi Zoubb2af4f2009-08-14 12:41:57 +0000748#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
Chris Leech01d5b2f2009-02-27 14:06:49 -0800749
Herbert Xu79671682006-06-22 02:40:14 -0700750 /* Segmentation offload features */
Patrick McHardy289c79a2008-05-23 00:22:04 -0700751#define NETIF_F_GSO_SHIFT 16
Chris Leech43eb99c2009-02-27 14:06:43 -0800752#define NETIF_F_GSO_MASK 0x00ff0000
Herbert Xu79671682006-06-22 02:40:14 -0700753#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700754#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
Herbert Xu576a30e2006-06-27 13:22:38 -0700755#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700756#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
757#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
Chris Leech01d5b2f2009-02-27 14:06:49 -0800758#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700759
Herbert Xu78eb8872006-08-17 18:22:32 -0700760 /* List of features with software fallbacks. */
761#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
762
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700763
Herbert Xu8648b302006-06-17 22:06:05 -0700764#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700765#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
766#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
767#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
Herbert Xu8648b302006-06-17 22:06:05 -0700768
Herbert Xub63365a2008-10-23 01:11:29 -0700769 /*
770 * If one device supports one of these features, then enable them
771 * for all in netdev_increment_features.
772 */
773#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800774 NETIF_F_SG | NETIF_F_HIGHDMA | \
Herbert Xub63365a2008-10-23 01:11:29 -0700775 NETIF_F_FRAGLIST)
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 /* Interface index. Unique device identifier */
778 int ifindex;
779 int iflink;
780
Rusty Russellc45d2862007-03-28 14:29:08 -0700781 struct net_device_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
Johannes Bergb86e0282007-04-26 20:48:23 -0700783#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 /* List of functions to handle Wireless Extensions (instead of ioctl).
785 * See <net/iw_handler.h> for details. Jean II */
786 const struct iw_handler_def * wireless_handlers;
787 /* Instance data managed by the core of Wireless Extensions. */
788 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -0700789#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800790 /* Management operations */
791 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -0700792 const struct ethtool_ops *ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700794 /* Hardware header description */
795 const struct header_ops *header_ops;
796
Stefan Rompfb00055a2006-03-20 17:09:11 -0800797 unsigned int flags; /* interface flags (a la BSD) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 unsigned short gflags;
799 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
800 unsigned short padded; /* How much padding added by alloc_netdev() */
801
Stefan Rompfb00055a2006-03-20 17:09:11 -0800802 unsigned char operstate; /* RFC2863 operstate */
803 unsigned char link_mode; /* mapping policy to operstate */
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 unsigned mtu; /* interface MTU value */
806 unsigned short type; /* interface hardware type */
807 unsigned short hard_header_len; /* hardware hdr length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Johannes Bergf5184d22008-05-12 20:48:31 -0700809 /* extra head- and tailroom the hardware may need, but not in all cases
810 * can this be guaranteed, especially tailroom. Some cases also use
811 * LL_MAX_HEADER instead to allocate the skb.
812 */
813 unsigned short needed_headroom;
814 unsigned short needed_tailroom;
815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 struct net_device *master; /* Pointer to master device of a group,
817 * which this device is member of.
818 */
819
820 /* Interface address info. */
Jon Wetzela6f9a702005-08-20 17:15:54 -0700821 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 unsigned char addr_len; /* hardware address length */
823 unsigned short dev_id; /* for shared network cards */
824
Jiri Pirko31278e72009-06-17 01:12:19 +0000825 struct netdev_hw_addr_list uc; /* Secondary unicast
826 mac addresses */
Patrick McHardy4417da62007-06-27 01:28:10 -0700827 int uc_promisc;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000828 spinlock_t addr_list_lock;
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700829 struct dev_addr_list *mc_list; /* Multicast mac addresses */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 int mc_count; /* Number of installed mcasts */
Wang Chen9d45abe2008-06-17 21:12:48 -0700831 unsigned int promiscuity;
832 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 /* Protocol specific pointers */
836
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000837#ifdef CONFIG_NET_DSA
838 void *dsa_ptr; /* dsa specific data */
839#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 void *atalk_ptr; /* AppleTalk link */
Graf Yangfe2918b2009-02-05 21:26:19 -0800841 void *ip_ptr; /* IPv4 specific data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 void *dn_ptr; /* DECnet specific data */
843 void *ip6_ptr; /* IPv6 specific data */
844 void *ec_ptr; /* Econet specific data */
845 void *ax25_ptr; /* AX.25 specific data */
Johannes Berg704232c2007-04-23 12:20:05 -0700846 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
847 assign before registering */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700849/*
850 * Cache line mostly used on receive path (including eth_type_trans())
851 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700852 unsigned long last_rx; /* Time of last Rx */
853 /* Interface address info used in eth_type_trans() */
Jiri Pirkof001fde2009-05-05 02:48:28 +0000854 unsigned char *dev_addr; /* hw address, (before bcast
855 because most packets are
856 unicast) */
857
Jiri Pirko31278e72009-06-17 01:12:19 +0000858 struct netdev_hw_addr_list dev_addrs; /* list of device
859 hw addresses */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700861 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
862
David S. Millerbb949fb2008-07-08 16:55:56 -0700863 struct netdev_queue rx_queue;
David S. Millere8a04642008-07-17 00:34:19 -0700864
865 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700866
867 /* Number of TX queues allocated at alloc_netdev_mq() time */
David S. Millere8a04642008-07-17 00:34:19 -0700868 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700869
870 /* Number of TX queues currently active in device */
871 unsigned int real_num_tx_queues;
872
Patrick McHardyaf356af2009-09-04 06:41:18 +0000873 /* root qdisc from userspace point of view */
874 struct Qdisc *qdisc;
875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 unsigned long tx_queue_len; /* Max frames per queue allowed */
David S. Millerc3f26a22008-07-31 16:58:50 -0700877 spinlock_t tx_global_lock;
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700878/*
879 * One part is mostly used on xmit path (device)
880 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700881 /* These may be needed for future network-power-down code. */
Eric Dumazet9d214932009-05-17 20:55:16 -0700882
883 /*
884 * trans_start here is expensive for high speed devices on SMP,
885 * please use netdev_queue->trans_start instead.
886 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700887 unsigned long trans_start; /* Time (in jiffies) of last Tx */
888
889 int watchdog_timeo; /* used by dev_watchdog() */
890 struct timer_list watchdog_timer;
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 /* Number of references to this device */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700893 atomic_t refcnt ____cacheline_aligned_in_smp;
894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 /* delayed register/unregister */
896 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 /* device index hash chain */
898 struct hlist_node index_hlist;
899
Eric Dumazete014deb2009-11-17 05:59:21 +0000900 struct list_head link_watch_list;
Herbert Xu572a1032007-05-08 18:34:17 -0700901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 /* register/unregister state machine */
903 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -0700904 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 NETREG_UNREGISTERING, /* called unregister_netdevice */
906 NETREG_UNREGISTERED, /* completed unregister todo */
907 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -0800908 NETREG_DUMMY, /* dummy device for NAPI poll */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 } reg_state;
910
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800911 /* Called from unregister, can be used to call free_netdev */
912 void (*destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914#ifdef CONFIG_NETPOLL
Jeff Moyer115c1d62005-06-22 22:05:31 -0700915 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916#endif
David S. Millereae792b2008-07-15 03:03:33 -0700917
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900918#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200919 /* Network namespace this network device is inside */
920 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900921#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200922
David S. Miller49517042008-05-12 03:29:11 -0700923 /* mid-layer private */
924 void *ml_priv;
925
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 /* bridge stuff */
927 struct net_bridge_port *br_port;
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700928 /* macvlan */
929 struct macvlan_port *macvlan_port;
Patrick McHardyeca9eba2008-07-05 21:26:13 -0700930 /* GARP */
931 struct garp_port *garp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 /* class/net/name entry */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700934 struct device dev;
Eric W. Biederman0c509a62009-10-29 14:18:21 +0000935 /* space for optional device, statistics, and wireless sysfs groups */
936 const struct attribute_group *sysfs_groups[4];
Patrick McHardy38f7b872007-06-13 12:03:51 -0700937
938 /* rtnetlink link ops */
939 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700940
Patrick McHardy289c79a2008-05-23 00:22:04 -0700941 /* VLAN feature mask */
942 unsigned long vlan_features;
943
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -0700944 /* for setting kernel sock attribute on TCP connection setup */
945#define GSO_MAX_SIZE 65536
946 unsigned int gso_max_size;
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800947
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -0800948#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -0800949 /* Data Center Bridging netlink ops */
Stephen Hemminger32953542009-10-05 06:01:03 +0000950 const struct dcbnl_rtnl_ops *dcbnl_ops;
Alexander Duyck2f90b862008-11-20 20:52:10 -0800951#endif
952
Yi Zou4d288d52009-02-27 14:06:59 -0800953#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
954 /* max exchange id for FCoE LRO by ddp */
955 unsigned int fcoe_ddp_xid;
956#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700958#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
960#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
David S. Millere8a04642008-07-17 00:34:19 -0700962static inline
963struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
964 unsigned int index)
965{
966 return &dev->_tx[index];
967}
968
969static inline void netdev_for_each_tx_queue(struct net_device *dev,
970 void (*f)(struct net_device *,
971 struct netdev_queue *,
972 void *),
973 void *arg)
974{
975 unsigned int i;
976
977 for (i = 0; i < dev->num_tx_queues; i++)
978 f(dev, &dev->_tx[i], arg);
979}
980
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900981/*
982 * Net namespace inlines
983 */
984static inline
985struct net *dev_net(const struct net_device *dev)
986{
987#ifdef CONFIG_NET_NS
988 return dev->nd_net;
989#else
990 return &init_net;
991#endif
992}
993
994static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -0700995void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900996{
997#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -0700998 release_net(dev->nd_net);
999 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001000#endif
1001}
1002
Lennert Buytenhekcf85d082008-10-07 13:45:02 +00001003static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1004{
1005#ifdef CONFIG_NET_DSA_TAG_DSA
1006 if (dev->dsa_ptr != NULL)
1007 return dsa_uses_dsa_tags(dev->dsa_ptr);
1008#endif
1009
1010 return 0;
1011}
1012
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001013#ifndef CONFIG_NET_NS
1014static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1015{
1016 skb->dev = dev;
1017}
1018#else /* CONFIG_NET_NS */
1019void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
1020#endif
1021
Lennert Buytenhek396138f02008-10-07 13:46:07 +00001022static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1023{
1024#ifdef CONFIG_NET_DSA_TAG_TRAILER
1025 if (dev->dsa_ptr != NULL)
1026 return dsa_uses_trailer_tags(dev->dsa_ptr);
1027#endif
1028
1029 return 0;
1030}
1031
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001032/**
1033 * netdev_priv - access network device private data
1034 * @dev: network device
1035 *
1036 * Get network device private data
1037 */
Patrick McHardy6472ce62007-06-13 12:03:21 -07001038static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00001040 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041}
1042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043/* Set the sysfs physical device reference for the network logical device
1044 * if set prior to registration will cause a symlink during initialization.
1045 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001046#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
Marcel Holtmann384912e2009-08-31 21:08:19 +00001048/* Set the sysfs device type for the network logical device to allow
1049 * fin grained indentification of different network device types. For
1050 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1051 */
1052#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1053
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07001054/**
1055 * netif_napi_add - initialize a napi context
1056 * @dev: network device
1057 * @napi: napi context
1058 * @poll: polling function
1059 * @weight: default weight
1060 *
1061 * netif_napi_add() must be used to initialize a napi context prior to calling
1062 * *any* of the other napi related functions.
1063 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001064void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1065 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001066
Alexander Duyckd8156532008-07-08 15:13:05 -07001067/**
1068 * netif_napi_del - remove a napi context
1069 * @napi: napi context
1070 *
1071 * netif_napi_del() removes a napi context from the network device napi list
1072 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001073void netif_napi_del(struct napi_struct *napi);
1074
1075struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00001076 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1077 void *frag0;
1078
Herbert Xu74895942009-05-26 18:50:27 +00001079 /* Length of frag0. */
1080 unsigned int frag0_len;
1081
Herbert Xu86911732009-01-29 14:19:50 +00001082 /* This indicates where we are processing relative to skb->data. */
1083 int data_offset;
1084
Herbert Xud565b0a2008-12-15 23:38:52 -08001085 /* This is non-zero if the packet may be of the same flow. */
1086 int same_flow;
1087
1088 /* This is non-zero if the packet cannot be merged with the new skb. */
1089 int flush;
1090
1091 /* Number of segments aggregated. */
1092 int count;
Herbert Xu5d38a072009-01-04 16:13:40 -08001093
1094 /* Free the skb? */
1095 int free;
Herbert Xud565b0a2008-12-15 23:38:52 -08001096};
1097
1098#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07001099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001101 __be16 type; /* This is really htons(ether_type). */
1102 struct net_device *dev; /* NULL is wildcarded here */
1103 int (*func) (struct sk_buff *,
1104 struct net_device *,
1105 struct packet_type *,
1106 struct net_device *);
Herbert Xu576a30e2006-06-27 13:22:38 -07001107 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1108 int features);
Herbert Xua430a432006-07-08 13:34:56 -07001109 int (*gso_send_check)(struct sk_buff *skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08001110 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1111 struct sk_buff *skb);
1112 int (*gro_complete)(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 void *af_packet_priv;
1114 struct list_head list;
1115};
1116
1117#include <linux/interrupt.h>
1118#include <linux/notifier.h>
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120extern rwlock_t dev_base_lock; /* Device list lock */
1121
Eric W. Biederman881d9662007-09-17 11:56:21 -07001122
1123#define for_each_netdev(net, d) \
1124 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
Eric W. Biedermandcbccbd42009-11-29 22:25:26 +00001125#define for_each_netdev_reverse(net, d) \
1126 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08001127#define for_each_netdev_rcu(net, d) \
1128 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
Eric W. Biederman881d9662007-09-17 11:56:21 -07001129#define for_each_netdev_safe(net, d, n) \
1130 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1131#define for_each_netdev_continue(net, d) \
1132 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
stephen hemminger254245d2009-11-10 07:54:47 +00001133#define for_each_netdev_continue_rcu(net, d) \
1134 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001135#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1136
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001137static inline struct net_device *next_net_device(struct net_device *dev)
1138{
1139 struct list_head *lh;
1140 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07001141
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001142 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001143 lh = dev->dev_list.next;
1144 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1145}
1146
Eric Dumazetce81b762009-11-11 17:34:30 +00001147static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1148{
1149 struct list_head *lh;
1150 struct net *net;
1151
1152 net = dev_net(dev);
1153 lh = rcu_dereference(dev->dev_list.next);
1154 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1155}
1156
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001157static inline struct net_device *first_net_device(struct net *net)
1158{
1159 return list_empty(&net->dev_base_head) ? NULL :
1160 net_device_entry(net->dev_base_head.next);
1161}
Pavel Emelianov7562f872007-05-03 15:13:45 -07001162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163extern int netdev_boot_setup_check(struct net_device *dev);
1164extern unsigned long netdev_boot_base(const char *prefix, int unit);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001165extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
1166extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1167extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168extern void dev_add_pack(struct packet_type *pt);
1169extern void dev_remove_pack(struct packet_type *pt);
1170extern void __dev_remove_pack(struct packet_type *pt);
1171
Eric W. Biederman881d9662007-09-17 11:56:21 -07001172extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 unsigned short mask);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001174extern struct net_device *dev_get_by_name(struct net *net, const char *name);
Eric Dumazet72c95282009-10-30 07:11:27 +00001175extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001176extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177extern int dev_alloc_name(struct net_device *dev, const char *name);
1178extern int dev_open(struct net_device *dev);
1179extern int dev_close(struct net_device *dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001180extern void dev_disable_lro(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181extern int dev_queue_xmit(struct sk_buff *skb);
1182extern int register_netdevice(struct net_device *dev);
Eric Dumazet44a08732009-10-27 07:03:04 +00001183extern void unregister_netdevice_queue(struct net_device *dev,
1184 struct list_head *head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00001185extern void unregister_netdevice_many(struct list_head *head);
Eric Dumazet44a08732009-10-27 07:03:04 +00001186static inline void unregister_netdevice(struct net_device *dev)
1187{
1188 unregister_netdevice_queue(dev, NULL);
1189}
1190
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191extern void free_netdev(struct net_device *dev);
1192extern void synchronize_net(void);
1193extern int register_netdevice_notifier(struct notifier_block *nb);
1194extern int unregister_netdevice_notifier(struct notifier_block *nb);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001195extern int init_dummy_netdev(struct net_device *dev);
David S. Miller9d40bbd2009-03-04 23:46:25 -08001196extern void netdev_resync_ops(struct net_device *dev);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001197
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001198extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001199extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1200extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00001201extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202extern int dev_restart(struct net_device *dev);
1203#ifdef CONFIG_NETPOLL_TRAP
1204extern int netpoll_trap(void);
1205#endif
Herbert Xu86911732009-01-29 14:19:50 +00001206extern int skb_gro_receive(struct sk_buff **head,
1207 struct sk_buff *skb);
Herbert Xu78a478d2009-05-26 18:50:21 +00001208extern void skb_gro_reset_offset(struct sk_buff *skb);
Herbert Xu86911732009-01-29 14:19:50 +00001209
1210static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1211{
1212 return NAPI_GRO_CB(skb)->data_offset;
1213}
1214
1215static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1216{
1217 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1218}
1219
1220static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1221{
1222 NAPI_GRO_CB(skb)->data_offset += len;
1223}
1224
Herbert Xua5b1cf22009-05-26 18:50:28 +00001225static inline void *skb_gro_header_fast(struct sk_buff *skb,
1226 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00001227{
Herbert Xu78a478d2009-05-26 18:50:21 +00001228 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00001229}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
Herbert Xua5b1cf22009-05-26 18:50:28 +00001231static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1232{
1233 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1234}
1235
1236static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1237 unsigned int offset)
1238{
1239 NAPI_GRO_CB(skb)->frag0 = NULL;
1240 NAPI_GRO_CB(skb)->frag0_len = 0;
1241 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1242}
1243
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001244static inline void *skb_gro_mac_header(struct sk_buff *skb)
1245{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001246 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001247}
1248
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001249static inline void *skb_gro_network_header(struct sk_buff *skb)
1250{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001251 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1252 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001253}
1254
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001255static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1256 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001257 const void *daddr, const void *saddr,
1258 unsigned len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001259{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02001260 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001261 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001262
1263 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001264}
1265
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001266static inline int dev_parse_header(const struct sk_buff *skb,
1267 unsigned char *haddr)
1268{
1269 const struct net_device *dev = skb->dev;
1270
Patrick McHardy1b833362007-10-18 05:09:28 -07001271 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001272 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001273 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001274}
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1277extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1278static inline int unregister_gifconf(unsigned int family)
1279{
1280 return register_gifconf(family, NULL);
1281}
1282
1283/*
1284 * Incoming packets are placed on per-cpu queues so that
1285 * no locking is needed.
1286 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001287struct softnet_data {
David S. Miller37437bb2008-07-16 02:15:04 -07001288 struct Qdisc *output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 struct sk_buff_head input_pkt_queue;
1290 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 struct sk_buff *completion_queue;
1292
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001293 struct napi_struct backlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294};
1295
1296DECLARE_PER_CPU(struct softnet_data,softnet_data);
1297
1298#define HAVE_NETIF_QUEUE
1299
David S. Miller37437bb2008-07-16 02:15:04 -07001300extern void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -07001301
1302static inline void netif_schedule_queue(struct netdev_queue *txq)
1303{
David S. Miller79d16382008-07-08 23:14:46 -07001304 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001305 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -07001306}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001308static inline void netif_tx_schedule_all(struct net_device *dev)
1309{
1310 unsigned int i;
1311
1312 for (i = 0; i < dev->num_tx_queues; i++)
1313 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1314}
1315
Dave Jonesd29f7492008-07-22 14:09:06 -07001316static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1317{
1318 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1319}
1320
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001321/**
1322 * netif_start_queue - allow transmit
1323 * @dev: network device
1324 *
1325 * Allow upper layers to call the device hard_start_xmit routine.
1326 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327static inline void netif_start_queue(struct net_device *dev)
1328{
David S. Millere8a04642008-07-17 00:34:19 -07001329 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330}
1331
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001332static inline void netif_tx_start_all_queues(struct net_device *dev)
1333{
1334 unsigned int i;
1335
1336 for (i = 0; i < dev->num_tx_queues; i++) {
1337 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1338 netif_tx_start_queue(txq);
1339 }
1340}
1341
David S. Miller79d16382008-07-08 23:14:46 -07001342static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343{
1344#ifdef CONFIG_NETPOLL_TRAP
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001345 if (netpoll_trap()) {
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00001346 netif_tx_start_queue(dev_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 return;
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001348 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349#endif
David S. Miller79d16382008-07-08 23:14:46 -07001350 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001351 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07001352}
1353
Dave Jonesd29f7492008-07-22 14:09:06 -07001354/**
1355 * netif_wake_queue - restart transmit
1356 * @dev: network device
1357 *
1358 * Allow upper layers to call the device hard_start_xmit routine.
1359 * Used for flow control when transmit resources are available.
1360 */
David S. Miller79d16382008-07-08 23:14:46 -07001361static inline void netif_wake_queue(struct net_device *dev)
1362{
David S. Millere8a04642008-07-17 00:34:19 -07001363 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364}
1365
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001366static inline void netif_tx_wake_all_queues(struct net_device *dev)
1367{
1368 unsigned int i;
1369
1370 for (i = 0; i < dev->num_tx_queues; i++) {
1371 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1372 netif_tx_wake_queue(txq);
1373 }
1374}
1375
Dave Jonesd29f7492008-07-22 14:09:06 -07001376static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1377{
1378 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1379}
1380
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001381/**
1382 * netif_stop_queue - stop transmitted packets
1383 * @dev: network device
1384 *
1385 * Stop upper layers calling the device hard_start_xmit routine.
1386 * Used for flow control when transmit resources are unavailable.
1387 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388static inline void netif_stop_queue(struct net_device *dev)
1389{
David S. Millere8a04642008-07-17 00:34:19 -07001390 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391}
1392
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001393static inline void netif_tx_stop_all_queues(struct net_device *dev)
1394{
1395 unsigned int i;
1396
1397 for (i = 0; i < dev->num_tx_queues; i++) {
1398 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1399 netif_tx_stop_queue(txq);
1400 }
1401}
1402
Dave Jonesd29f7492008-07-22 14:09:06 -07001403static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1404{
1405 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1406}
1407
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001408/**
1409 * netif_queue_stopped - test if transmit queue is flowblocked
1410 * @dev: network device
1411 *
1412 * Test if transmit queue on device is currently unable to send.
1413 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414static inline int netif_queue_stopped(const struct net_device *dev)
1415{
David S. Millere8a04642008-07-17 00:34:19 -07001416 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417}
1418
David S. Millerc3f26a22008-07-31 16:58:50 -07001419static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1420{
1421 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1422}
1423
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001424/**
1425 * netif_running - test if up
1426 * @dev: network device
1427 *
1428 * Test if the device has been brought up.
1429 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430static inline int netif_running(const struct net_device *dev)
1431{
1432 return test_bit(__LINK_STATE_START, &dev->state);
1433}
1434
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001435/*
1436 * Routines to manage the subqueues on a device. We only need start
1437 * stop, and a check if it's stopped. All other device management is
1438 * done at the overall netdevice level.
1439 * Also test the device if we're multiqueue.
1440 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001441
1442/**
1443 * netif_start_subqueue - allow sending packets on subqueue
1444 * @dev: network device
1445 * @queue_index: sub queue index
1446 *
1447 * Start individual transmit queue of a device with multiple transmit queues.
1448 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001449static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1450{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001451 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00001452
1453 netif_tx_start_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001454}
1455
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001456/**
1457 * netif_stop_subqueue - stop sending packets on subqueue
1458 * @dev: network device
1459 * @queue_index: sub queue index
1460 *
1461 * Stop individual transmit queue of a device with multiple transmit queues.
1462 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001463static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1464{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001465 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001466#ifdef CONFIG_NETPOLL_TRAP
1467 if (netpoll_trap())
1468 return;
1469#endif
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00001470 netif_tx_stop_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001471}
1472
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001473/**
1474 * netif_subqueue_stopped - test status of subqueue
1475 * @dev: network device
1476 * @queue_index: sub queue index
1477 *
1478 * Check individual transmit queue of a device with multiple transmit queues.
1479 */
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001480static inline int __netif_subqueue_stopped(const struct net_device *dev,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001481 u16 queue_index)
1482{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001483 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00001484
1485 return netif_tx_queue_stopped(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001486}
1487
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001488static inline int netif_subqueue_stopped(const struct net_device *dev,
1489 struct sk_buff *skb)
1490{
1491 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1492}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001493
1494/**
1495 * netif_wake_subqueue - allow sending packets on subqueue
1496 * @dev: network device
1497 * @queue_index: sub queue index
1498 *
1499 * Resume individual transmit queue of a device with multiple transmit queues.
1500 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001501static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1502{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001503 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001504#ifdef CONFIG_NETPOLL_TRAP
1505 if (netpoll_trap())
1506 return;
1507#endif
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001508 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001509 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001510}
1511
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001512/**
1513 * netif_is_multiqueue - test if device has multiple transmit queues
1514 * @dev: network device
1515 *
1516 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001517 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001518static inline int netif_is_multiqueue(const struct net_device *dev)
1519{
David S. Miller09e83b52008-07-17 01:52:12 -07001520 return (dev->num_tx_queues > 1);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001521}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
1523/* Use this variant when it is known for sure that it
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001524 * is executing from hardware interrupt context or with hardware interrupts
1525 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001527extern void dev_kfree_skb_irq(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529/* Use this variant in places where it could be invoked
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001530 * from either hardware interrupt or other context, with hardware interrupts
1531 * either disabled or enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001533extern void dev_kfree_skb_any(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535#define HAVE_NETIF_RX 1
1536extern int netif_rx(struct sk_buff *skb);
1537extern int netif_rx_ni(struct sk_buff *skb);
1538#define HAVE_NETIF_RECEIVE_SKB 1
1539extern int netif_receive_skb(struct sk_buff *skb);
Ben Hutchings5b252f02009-10-29 07:17:09 +00001540extern gro_result_t dev_gro_receive(struct napi_struct *napi,
Herbert Xu96e93ea2009-01-06 10:49:34 -08001541 struct sk_buff *skb);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07001542extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
1543extern gro_result_t napi_gro_receive(struct napi_struct *napi,
Herbert Xud565b0a2008-12-15 23:38:52 -08001544 struct sk_buff *skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08001545extern void napi_reuse_skb(struct napi_struct *napi,
1546 struct sk_buff *skb);
Herbert Xu76620aa2009-04-16 02:02:07 -07001547extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07001548extern gro_result_t napi_frags_finish(struct napi_struct *napi,
Ben Hutchings5b252f02009-10-29 07:17:09 +00001549 struct sk_buff *skb,
1550 gro_result_t ret);
Herbert Xu76620aa2009-04-16 02:02:07 -07001551extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07001552extern gro_result_t napi_gro_frags(struct napi_struct *napi);
Herbert Xu76620aa2009-04-16 02:02:07 -07001553
1554static inline void napi_free_frags(struct napi_struct *napi)
1555{
1556 kfree_skb(napi->skb);
1557 napi->skb = NULL;
1558}
1559
Patrick McHardybc1d0412008-07-14 22:49:30 -07001560extern void netif_nit_deliver(struct sk_buff *skb);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08001561extern int dev_valid_name(const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001562extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1563extern int dev_ethtool(struct net *net, struct ifreq *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564extern unsigned dev_get_flags(const struct net_device *);
1565extern int dev_change_flags(struct net_device *, unsigned);
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001566extern int dev_change_name(struct net_device *, const char *);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001567extern int dev_set_alias(struct net_device *, const char *, size_t);
Eric W. Biedermance286d32007-09-12 13:53:49 +02001568extern int dev_change_net_namespace(struct net_device *,
1569 struct net *, const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570extern int dev_set_mtu(struct net_device *, int);
1571extern int dev_set_mac_address(struct net_device *,
1572 struct sockaddr *);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001573extern int dev_hard_start_xmit(struct sk_buff *skb,
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001574 struct net_device *dev,
1575 struct netdev_queue *txq);
Arnd Bergmann44540962009-11-26 06:07:08 +00001576extern int dev_forward_skb(struct net_device *dev,
1577 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001579extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
1581/* Called by rtnetlink.c:rtnl_unlock() */
1582extern void netdev_run_todo(void);
1583
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001584/**
1585 * dev_put - release reference to device
1586 * @dev: network device
1587 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001588 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001589 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590static inline void dev_put(struct net_device *dev)
1591{
1592 atomic_dec(&dev->refcnt);
1593}
1594
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001595/**
1596 * dev_hold - get reference to device
1597 * @dev: network device
1598 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001599 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001600 */
Stephen Hemminger15333062006-03-20 22:32:28 -08001601static inline void dev_hold(struct net_device *dev)
1602{
1603 atomic_inc(&dev->refcnt);
1604}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1607 * and _off may be called from IRQ context, but it is caller
1608 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08001609 *
1610 * The name carrier is inappropriate, these functions should really be
1611 * called netif_lowerlayer_*() because they represent the state of any
1612 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 */
1614
1615extern void linkwatch_fire_event(struct net_device *dev);
Eric Dumazete014deb2009-11-17 05:59:21 +00001616extern void linkwatch_forget_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001618/**
1619 * netif_carrier_ok - test if carrier present
1620 * @dev: network device
1621 *
1622 * Check if carrier is present on device
1623 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624static inline int netif_carrier_ok(const struct net_device *dev)
1625{
1626 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1627}
1628
Eric Dumazet9d214932009-05-17 20:55:16 -07001629extern unsigned long dev_trans_start(struct net_device *dev);
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631extern void __netdev_watchdog_up(struct net_device *dev);
1632
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001633extern void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001635extern void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001637/**
1638 * netif_dormant_on - mark device as dormant.
1639 * @dev: network device
1640 *
1641 * Mark device as dormant (as per RFC2863).
1642 *
1643 * The dormant state indicates that the relevant interface is not
1644 * actually in a condition to pass packets (i.e., it is not 'up') but is
1645 * in a "pending" state, waiting for some external event. For "on-
1646 * demand" interfaces, this new state identifies the situation where the
1647 * interface is waiting for events to place it in the up state.
1648 *
1649 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001650static inline void netif_dormant_on(struct net_device *dev)
1651{
1652 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1653 linkwatch_fire_event(dev);
1654}
1655
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001656/**
1657 * netif_dormant_off - set device as not dormant.
1658 * @dev: network device
1659 *
1660 * Device is not in dormant state.
1661 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001662static inline void netif_dormant_off(struct net_device *dev)
1663{
1664 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1665 linkwatch_fire_event(dev);
1666}
1667
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001668/**
1669 * netif_dormant - test if carrier present
1670 * @dev: network device
1671 *
1672 * Check if carrier is present on device
1673 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001674static inline int netif_dormant(const struct net_device *dev)
1675{
1676 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1677}
1678
1679
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001680/**
1681 * netif_oper_up - test if device is operational
1682 * @dev: network device
1683 *
1684 * Check if carrier is operational
1685 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001686static inline int netif_oper_up(const struct net_device *dev)
1687{
Stefan Rompfb00055a2006-03-20 17:09:11 -08001688 return (dev->operstate == IF_OPER_UP ||
1689 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1690}
1691
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001692/**
1693 * netif_device_present - is device available or removed
1694 * @dev: network device
1695 *
1696 * Check if device has not been removed from system.
1697 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698static inline int netif_device_present(struct net_device *dev)
1699{
1700 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1701}
1702
Denis Vlasenko56079432006-03-29 15:57:29 -08001703extern void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Denis Vlasenko56079432006-03-29 15:57:29 -08001705extern void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707/*
1708 * Network interface message level settings
1709 */
1710#define HAVE_NETIF_MSG 1
1711
1712enum {
1713 NETIF_MSG_DRV = 0x0001,
1714 NETIF_MSG_PROBE = 0x0002,
1715 NETIF_MSG_LINK = 0x0004,
1716 NETIF_MSG_TIMER = 0x0008,
1717 NETIF_MSG_IFDOWN = 0x0010,
1718 NETIF_MSG_IFUP = 0x0020,
1719 NETIF_MSG_RX_ERR = 0x0040,
1720 NETIF_MSG_TX_ERR = 0x0080,
1721 NETIF_MSG_TX_QUEUED = 0x0100,
1722 NETIF_MSG_INTR = 0x0200,
1723 NETIF_MSG_TX_DONE = 0x0400,
1724 NETIF_MSG_RX_STATUS = 0x0800,
1725 NETIF_MSG_PKTDATA = 0x1000,
1726 NETIF_MSG_HW = 0x2000,
1727 NETIF_MSG_WOL = 0x4000,
1728};
1729
1730#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1731#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1732#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1733#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1734#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1735#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1736#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1737#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1738#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1739#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1740#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1741#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1742#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1743#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1744#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1745
1746static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1747{
1748 /* use default */
1749 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1750 return default_msg_enable_bits;
1751 if (debug_value == 0) /* no output */
1752 return 0;
1753 /* set low N bits */
1754 return (1 << debug_value) - 1;
1755}
1756
David S. Millerc773e842008-07-08 23:13:53 -07001757static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07001758{
David S. Millerc773e842008-07-08 23:13:53 -07001759 spin_lock(&txq->_xmit_lock);
1760 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001761}
1762
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001763static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1764{
1765 spin_lock_bh(&txq->_xmit_lock);
1766 txq->xmit_lock_owner = smp_processor_id();
1767}
1768
David S. Millerc773e842008-07-08 23:13:53 -07001769static inline int __netif_tx_trylock(struct netdev_queue *txq)
1770{
1771 int ok = spin_trylock(&txq->_xmit_lock);
1772 if (likely(ok))
1773 txq->xmit_lock_owner = smp_processor_id();
1774 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07001775}
1776
David S. Millerc773e842008-07-08 23:13:53 -07001777static inline void __netif_tx_unlock(struct netdev_queue *txq)
1778{
1779 txq->xmit_lock_owner = -1;
1780 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001781}
1782
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001783static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1784{
1785 txq->xmit_lock_owner = -1;
1786 spin_unlock_bh(&txq->_xmit_lock);
1787}
1788
Eric Dumazet08baf562009-05-25 22:58:01 -07001789static inline void txq_trans_update(struct netdev_queue *txq)
1790{
1791 if (txq->xmit_lock_owner != -1)
1792 txq->trans_start = jiffies;
1793}
1794
David S. Millerc3f26a22008-07-31 16:58:50 -07001795/**
1796 * netif_tx_lock - grab network device transmit lock
1797 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07001798 *
1799 * Get network device transmit lock
1800 */
1801static inline void netif_tx_lock(struct net_device *dev)
1802{
1803 unsigned int i;
1804 int cpu;
1805
1806 spin_lock(&dev->tx_global_lock);
1807 cpu = smp_processor_id();
1808 for (i = 0; i < dev->num_tx_queues; i++) {
1809 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1810
1811 /* We are the only thread of execution doing a
1812 * freeze, but we have to grab the _xmit_lock in
1813 * order to synchronize with threads which are in
1814 * the ->hard_start_xmit() handler and already
1815 * checked the frozen bit.
1816 */
1817 __netif_tx_lock(txq, cpu);
1818 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1819 __netif_tx_unlock(txq);
1820 }
1821}
1822
1823static inline void netif_tx_lock_bh(struct net_device *dev)
1824{
1825 local_bh_disable();
1826 netif_tx_lock(dev);
1827}
1828
Herbert Xu932ff272006-06-09 12:20:56 -07001829static inline void netif_tx_unlock(struct net_device *dev)
1830{
David S. Millere8a04642008-07-17 00:34:19 -07001831 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07001832
David S. Millere8a04642008-07-17 00:34:19 -07001833 for (i = 0; i < dev->num_tx_queues; i++) {
1834 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07001835
David S. Millerc3f26a22008-07-31 16:58:50 -07001836 /* No need to grab the _xmit_lock here. If the
1837 * queue is not stopped for another reason, we
1838 * force a schedule.
1839 */
1840 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00001841 netif_schedule_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07001842 }
1843 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001844}
1845
1846static inline void netif_tx_unlock_bh(struct net_device *dev)
1847{
David S. Millere8a04642008-07-17 00:34:19 -07001848 netif_tx_unlock(dev);
1849 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07001850}
1851
David S. Millerc773e842008-07-08 23:13:53 -07001852#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001853 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001854 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001855 } \
1856}
1857
David S. Millerc773e842008-07-08 23:13:53 -07001858#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001859 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001860 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001861 } \
1862}
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864static inline void netif_tx_disable(struct net_device *dev)
1865{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001866 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07001867 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001868
David S. Millerc3f26a22008-07-31 16:58:50 -07001869 local_bh_disable();
1870 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001871 for (i = 0; i < dev->num_tx_queues; i++) {
1872 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07001873
1874 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001875 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07001876 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001877 }
David S. Millerc3f26a22008-07-31 16:58:50 -07001878 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879}
1880
David S. Millere308a5d2008-07-15 00:13:44 -07001881static inline void netif_addr_lock(struct net_device *dev)
1882{
1883 spin_lock(&dev->addr_list_lock);
1884}
1885
1886static inline void netif_addr_lock_bh(struct net_device *dev)
1887{
1888 spin_lock_bh(&dev->addr_list_lock);
1889}
1890
1891static inline void netif_addr_unlock(struct net_device *dev)
1892{
1893 spin_unlock(&dev->addr_list_lock);
1894}
1895
1896static inline void netif_addr_unlock_bh(struct net_device *dev)
1897{
1898 spin_unlock_bh(&dev->addr_list_lock);
1899}
1900
Jiri Pirkof001fde2009-05-05 02:48:28 +00001901/*
Jiri Pirko31278e72009-06-17 01:12:19 +00001902 * dev_addrs walker. Should be used only for read access. Call with
Jiri Pirkof001fde2009-05-05 02:48:28 +00001903 * rcu_read_lock held.
1904 */
1905#define for_each_dev_addr(dev, ha) \
Jiri Pirko31278e72009-06-17 01:12:19 +00001906 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00001907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1909
1910extern void ether_setup(struct net_device *dev);
1911
1912/* Support for loadable net-drivers */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001913extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1914 void (*setup)(struct net_device *),
1915 unsigned int queue_count);
1916#define alloc_netdev(sizeof_priv, name, setup) \
1917 alloc_netdev_mq(sizeof_priv, name, setup, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918extern int register_netdev(struct net_device *dev);
1919extern void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00001920
1921/* Functions used for device addresses handling */
1922extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1923 unsigned char addr_type);
1924extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
1925 unsigned char addr_type);
1926extern int dev_addr_add_multiple(struct net_device *to_dev,
1927 struct net_device *from_dev,
1928 unsigned char addr_type);
1929extern int dev_addr_del_multiple(struct net_device *to_dev,
1930 struct net_device *from_dev,
1931 unsigned char addr_type);
1932
Patrick McHardy4417da62007-06-27 01:28:10 -07001933/* Functions used for secondary unicast and multicast support */
1934extern void dev_set_rx_mode(struct net_device *dev);
1935extern void __dev_set_rx_mode(struct net_device *dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00001936extern int dev_unicast_delete(struct net_device *dev, void *addr);
1937extern int dev_unicast_add(struct net_device *dev, void *addr);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001938extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1939extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1941extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
Patrick McHardya0a400d2007-07-14 18:52:02 -07001942extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1943extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07001944extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1945extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001946extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1947extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
Wang Chendad9b332008-06-18 01:48:28 -07001948extern int dev_set_promiscuity(struct net_device *dev, int inc);
1949extern int dev_set_allmulti(struct net_device *dev, int inc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950extern void netdev_state_change(struct net_device *dev);
Moni Shoua75c78502009-09-15 02:37:40 -07001951extern void netdev_bonding_change(struct net_device *dev,
1952 unsigned long event);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001953extern void netdev_features_change(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954/* Load a device via the kmod */
Eric W. Biederman881d9662007-09-17 11:56:21 -07001955extern void dev_load(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956extern void dev_mcast_init(void);
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08001957extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
Eric Dumazetd83345a2009-11-16 03:36:51 +00001958extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08001959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960extern int netdev_max_backlog;
1961extern int weight_p;
1962extern int netdev_set_master(struct net_device *dev, struct net_device *master);
Patrick McHardy84fa7932006-08-29 16:44:56 -07001963extern int skb_checksum_help(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001964extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
Herbert Xufb286bb2005-11-10 13:01:24 -08001965#ifdef CONFIG_BUG
1966extern void netdev_rx_csum_fault(struct net_device *dev);
1967#else
1968static inline void netdev_rx_csum_fault(struct net_device *dev)
1969{
1970}
1971#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972/* rx skb timestamps */
1973extern void net_enable_timestamp(void);
1974extern void net_disable_timestamp(void);
1975
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001976#ifdef CONFIG_PROC_FS
1977extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1978extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1979extern void dev_seq_stop(struct seq_file *seq, void *v);
1980#endif
1981
Jay Vosburghb8a97872008-06-13 18:12:04 -07001982extern int netdev_class_create_file(struct class_attribute *class_attr);
1983extern void netdev_class_remove_file(struct class_attribute *class_attr);
1984
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001985extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
Arjan van de Ven6579e572008-07-21 13:31:48 -07001986
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001987extern void linkwatch_run_queue(void);
1988
Herbert Xub63365a2008-10-23 01:11:29 -07001989unsigned long netdev_increment_features(unsigned long all, unsigned long one,
1990 unsigned long mask);
1991unsigned long netdev_fix_features(unsigned long features, const char *name);
Herbert Xu7f353bf2007-08-10 15:47:58 -07001992
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08001993void netif_stacked_transfer_operstate(const struct net_device *rootdev,
1994 struct net_device *dev);
1995
Herbert Xubcd76112006-06-30 13:36:35 -07001996static inline int net_gso_ok(int features, int gso_type)
1997{
1998 int feature = gso_type << NETIF_F_GSO_SHIFT;
1999 return (features & feature) == feature;
2000}
2001
Herbert Xu576a30e2006-06-27 13:22:38 -07002002static inline int skb_gso_ok(struct sk_buff *skb, int features)
2003{
Herbert Xu278b2512009-06-03 21:20:51 -07002004 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
David S. Millera5bd8a12009-06-09 00:17:27 -07002005 (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07002006}
2007
Herbert Xu79671682006-06-22 02:40:14 -07002008static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
2009{
Herbert Xua430a432006-07-08 13:34:56 -07002010 return skb_is_gso(skb) &&
2011 (!skb_gso_ok(skb, dev->features) ||
Patrick McHardy84fa7932006-08-29 16:44:56 -07002012 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
Herbert Xu79671682006-06-22 02:40:14 -07002013}
2014
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002015static inline void netif_set_gso_max_size(struct net_device *dev,
2016 unsigned int size)
2017{
2018 dev->gso_max_size = size;
2019}
2020
Jiri Pirko5d4e0392009-05-28 01:05:00 +00002021static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2022 struct net_device *master)
2023{
2024 if (skb->pkt_type == PACKET_HOST) {
2025 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2026
2027 memcpy(dest, master->dev_addr, ETH_ALEN);
2028 }
2029}
2030
David S. Miller7ea49ed2006-08-14 17:08:36 -07002031/* On bonding slaves other than the currently active slave, suppress
Jay Vosburghf5b2b962006-09-22 21:54:53 -07002032 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2033 * ARP on active-backup slaves with arp_validate enabled.
David S. Miller7ea49ed2006-08-14 17:08:36 -07002034 */
2035static inline int skb_bond_should_drop(struct sk_buff *skb)
2036{
2037 struct net_device *dev = skb->dev;
2038 struct net_device *master = dev->master;
2039
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08002040 if (master) {
2041 if (master->priv_flags & IFF_MASTER_ARPMON)
2042 dev->last_rx = jiffies;
Jay Vosburghf5b2b962006-09-22 21:54:53 -07002043
Jiri Pirko5d4e0392009-05-28 01:05:00 +00002044 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2045 /* Do address unmangle. The local destination address
2046 * will be always the one master has. Provides the right
2047 * functionality in a bridge.
2048 */
2049 skb_bond_set_mac_by_master(skb, master);
2050 }
2051
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08002052 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2053 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
Harvey Harrisonf3a7c662009-02-14 22:58:35 -08002054 skb->protocol == __cpu_to_be16(ETH_P_ARP))
David S. Miller7ea49ed2006-08-14 17:08:36 -07002055 return 0;
David S. Miller7ea49ed2006-08-14 17:08:36 -07002056
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08002057 if (master->priv_flags & IFF_MASTER_ALB) {
2058 if (skb->pkt_type != PACKET_BROADCAST &&
2059 skb->pkt_type != PACKET_MULTICAST)
2060 return 0;
2061 }
2062 if (master->priv_flags & IFF_MASTER_8023AD &&
Harvey Harrisonf3a7c662009-02-14 22:58:35 -08002063 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
Jay Vosburgh6cf3f412008-11-03 18:16:50 -08002064 return 0;
2065
2066 return 1;
2067 }
David S. Miller7ea49ed2006-08-14 17:08:36 -07002068 }
2069 return 0;
2070}
2071
Eric W. Biederman505d4f72008-11-07 22:54:20 -08002072extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00002073
2074static inline int dev_ethtool_get_settings(struct net_device *dev,
2075 struct ethtool_cmd *cmd)
2076{
2077 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
2078 return -EOPNOTSUPP;
2079 return dev->ethtool_ops->get_settings(dev, cmd);
2080}
2081
2082static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2083{
2084 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2085 return 0;
2086 return dev->ethtool_ops->get_rx_csum(dev);
2087}
2088
2089static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2090{
2091 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2092 return 0;
2093 return dev->ethtool_ops->get_flags(dev);
2094}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095#endif /* __KERNEL__ */
2096
Jiri Pirko385a1542009-05-27 15:48:07 -07002097#endif /* _LINUX_NETDEVICE_H */