blob: 1efe60c5c00c3dd5c57924707206afcf49ef2dc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
15struct netpoll;
16
17struct netpoll {
18 struct net_device *dev;
19 char dev_name[16], *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 void (*rx_hook)(struct netpoll *, int, char *, int);
21 void (*drop)(struct sk_buff *skb);
22 u32 local_ip, remote_ip;
23 u16 local_port, remote_port;
24 unsigned char local_mac[6], remote_mac[6];
Jeff Moyer115c1d62005-06-22 22:05:31 -070025};
26
27struct netpoll_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 spinlock_t poll_lock;
29 int poll_owner;
Matt Mackall0db1d6f2005-08-11 19:25:54 -070030 int tries;
Jeff Moyer115c1d62005-06-22 22:05:31 -070031 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070032 spinlock_t rx_lock;
33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
Neil Horman068c6e92006-06-26 00:04:27 -070034 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Linus Torvalds1da177e2005-04-16 15:20:36 -070035};
36
37void netpoll_poll(struct netpoll *np);
38void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
39int netpoll_parse_options(struct netpoll *np, char *opt);
40int netpoll_setup(struct netpoll *np);
41int netpoll_trap(void);
42void netpoll_set_trap(int trap);
43void netpoll_cleanup(struct netpoll *np);
44int __netpoll_rx(struct sk_buff *skb);
45void netpoll_queue(struct sk_buff *skb);
46
47#ifdef CONFIG_NETPOLL
48static inline int netpoll_rx(struct sk_buff *skb)
49{
Jeff Moyer115c1d62005-06-22 22:05:31 -070050 struct netpoll_info *npinfo = skb->dev->npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070051 unsigned long flags;
52 int ret = 0;
Jeff Moyer115c1d62005-06-22 22:05:31 -070053
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070054 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
Jeff Moyer115c1d62005-06-22 22:05:31 -070055 return 0;
56
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070057 spin_lock_irqsave(&npinfo->rx_lock, flags);
58 /* check rx_flags again with the lock held */
59 if (npinfo->rx_flags && __netpoll_rx(skb))
60 ret = 1;
61 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
62
63 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65
Matt Mackall53fb95d2005-08-11 19:27:43 -070066static inline void *netpoll_poll_lock(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Matt Mackall53fb95d2005-08-11 19:27:43 -070068 rcu_read_lock(); /* deal with race on ->npinfo */
Jeff Moyer115c1d62005-06-22 22:05:31 -070069 if (dev->npinfo) {
70 spin_lock(&dev->npinfo->poll_lock);
71 dev->npinfo->poll_owner = smp_processor_id();
Matt Mackall53fb95d2005-08-11 19:27:43 -070072 return dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070074 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
Matt Mackall53fb95d2005-08-11 19:27:43 -070077static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Matt Mackall53fb95d2005-08-11 19:27:43 -070079 struct netpoll_info *npi = have;
80
81 if (npi) {
82 npi->poll_owner = -1;
83 spin_unlock(&npi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070085 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88#else
89#define netpoll_rx(a) 0
Ben Dooksafb997c2005-10-12 15:12:21 -070090#define netpoll_poll_lock(a) NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#define netpoll_poll_unlock(a)
92#endif
93
94#endif