blob: a765ea89854989318d50d2371ad71dee60566048 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct netpoll {
16 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070017 char dev_name[IFNAMSIZ];
18 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 void (*rx_hook)(struct netpoll *, int, char *, int);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070020
Harvey Harrisone7557af2009-03-28 15:38:31 +000021 __be32 local_ip, remote_ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080023 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000024
25 struct list_head rx; /* rx_np list element */
Jeff Moyer115c1d62005-06-22 22:05:31 -070026};
27
28struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070029 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000030
David S. Millerd9452e92008-03-04 12:28:49 -080031 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070032 spinlock_t rx_lock;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000033 struct list_head rx_np; /* netpolls that registered an rx_hook */
34
Neil Horman068c6e92006-06-26 00:04:27 -070035 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070036 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000037
David Howells6d5aefb2006-12-05 19:36:26 +000038 struct delayed_work tx_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039};
40
41void netpoll_poll(struct netpoll *np);
42void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070043void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044int netpoll_parse_options(struct netpoll *np, char *opt);
45int netpoll_setup(struct netpoll *np);
46int netpoll_trap(void);
47void netpoll_set_trap(int trap);
48void netpoll_cleanup(struct netpoll *np);
49int __netpoll_rx(struct sk_buff *skb);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52#ifdef CONFIG_NETPOLL
53static inline int netpoll_rx(struct sk_buff *skb)
54{
Jeff Moyer115c1d62005-06-22 22:05:31 -070055 struct netpoll_info *npinfo = skb->dev->npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070056 unsigned long flags;
57 int ret = 0;
Jeff Moyer115c1d62005-06-22 22:05:31 -070058
Daniel Borkmann508e14b2010-01-12 14:27:30 +000059 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
Jeff Moyer115c1d62005-06-22 22:05:31 -070060 return 0;
61
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070062 spin_lock_irqsave(&npinfo->rx_lock, flags);
David S. Millerd9452e92008-03-04 12:28:49 -080063 /* check rx_flags again with the lock held */
64 if (npinfo->rx_flags && __netpoll_rx(skb))
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070065 ret = 1;
66 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
67
68 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
Herbert Xud1c76af2009-03-16 10:50:02 -070071static inline int netpoll_rx_on(struct sk_buff *skb)
72{
73 struct netpoll_info *npinfo = skb->dev->npinfo;
74
Daniel Borkmann508e14b2010-01-12 14:27:30 +000075 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
Herbert Xud1c76af2009-03-16 10:50:02 -070076}
77
Stephen Hemmingerbea33482007-10-03 16:41:36 -070078static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070080 if (!list_empty(&skb->dev->napi_list))
81 return netpoll_rx(skb);
82 return 0;
83}
84
85static inline void *netpoll_poll_lock(struct napi_struct *napi)
86{
87 struct net_device *dev = napi->dev;
88
Matt Mackall53fb95d2005-08-11 19:27:43 -070089 rcu_read_lock(); /* deal with race on ->npinfo */
Stephen Hemmingerbea33482007-10-03 16:41:36 -070090 if (dev && dev->npinfo) {
91 spin_lock(&napi->poll_lock);
92 napi->poll_owner = smp_processor_id();
93 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070095 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
Matt Mackall53fb95d2005-08-11 19:27:43 -070098static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700100 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700101
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700102 if (napi) {
103 napi->poll_owner = -1;
104 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700106 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
109#else
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700110static inline int netpoll_rx(struct sk_buff *skb)
111{
112 return 0;
113}
Herbert Xud1c76af2009-03-16 10:50:02 -0700114static inline int netpoll_rx_on(struct sk_buff *skb)
115{
116 return 0;
117}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700118static inline int netpoll_receive_skb(struct sk_buff *skb)
119{
120 return 0;
121}
122static inline void *netpoll_poll_lock(struct napi_struct *napi)
123{
124 return NULL;
125}
126static inline void netpoll_poll_unlock(void *have)
127{
128}
129static inline void netpoll_netdev_init(struct net_device *dev)
130{
131}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132#endif
133
134#endif