blob: ca5a8733000fa08b65c21b3456b052b9f48dde21 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
15struct netpoll;
16
17struct netpoll {
18 struct net_device *dev;
19 char dev_name[16], *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 void (*rx_hook)(struct netpoll *, int, char *, int);
21 void (*drop)(struct sk_buff *skb);
22 u32 local_ip, remote_ip;
23 u16 local_port, remote_port;
24 unsigned char local_mac[6], remote_mac[6];
Jeff Moyer115c1d62005-06-22 22:05:31 -070025};
26
27struct netpoll_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 spinlock_t poll_lock;
29 int poll_owner;
Matt Mackall0db1d6f2005-08-11 19:25:54 -070030 int tries;
Jeff Moyer115c1d62005-06-22 22:05:31 -070031 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070032 spinlock_t rx_lock;
33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034};
35
36void netpoll_poll(struct netpoll *np);
37void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
38int netpoll_parse_options(struct netpoll *np, char *opt);
39int netpoll_setup(struct netpoll *np);
40int netpoll_trap(void);
41void netpoll_set_trap(int trap);
42void netpoll_cleanup(struct netpoll *np);
43int __netpoll_rx(struct sk_buff *skb);
44void netpoll_queue(struct sk_buff *skb);
45
46#ifdef CONFIG_NETPOLL
47static inline int netpoll_rx(struct sk_buff *skb)
48{
Jeff Moyer115c1d62005-06-22 22:05:31 -070049 struct netpoll_info *npinfo = skb->dev->npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070050 unsigned long flags;
51 int ret = 0;
Jeff Moyer115c1d62005-06-22 22:05:31 -070052
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070053 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
Jeff Moyer115c1d62005-06-22 22:05:31 -070054 return 0;
55
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070056 spin_lock_irqsave(&npinfo->rx_lock, flags);
57 /* check rx_flags again with the lock held */
58 if (npinfo->rx_flags && __netpoll_rx(skb))
59 ret = 1;
60 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
61
62 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063}
64
Matt Mackall53fb95d2005-08-11 19:27:43 -070065static inline void *netpoll_poll_lock(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Matt Mackall53fb95d2005-08-11 19:27:43 -070067 rcu_read_lock(); /* deal with race on ->npinfo */
Jeff Moyer115c1d62005-06-22 22:05:31 -070068 if (dev->npinfo) {
69 spin_lock(&dev->npinfo->poll_lock);
70 dev->npinfo->poll_owner = smp_processor_id();
Matt Mackall53fb95d2005-08-11 19:27:43 -070071 return dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070073 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
Matt Mackall53fb95d2005-08-11 19:27:43 -070076static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Matt Mackall53fb95d2005-08-11 19:27:43 -070078 struct netpoll_info *npi = have;
79
80 if (npi) {
81 npi->poll_owner = -1;
82 spin_unlock(&npi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070084 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87#else
88#define netpoll_rx(a) 0
Ben Dooksafb997c2005-10-12 15:12:21 -070089#define netpoll_poll_lock(a) NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#define netpoll_poll_unlock(a)
91#endif
92
93#endif