blob: e3d79593fb3a53186523071b4e22d6fbee47e79a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct netpoll {
16 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070017 char dev_name[IFNAMSIZ];
18 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 void (*rx_hook)(struct netpoll *, int, char *, int);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070020
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 u32 local_ip, remote_ip;
22 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080023 u8 remote_mac[ETH_ALEN];
Jeff Moyer115c1d62005-06-22 22:05:31 -070024};
25
26struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070027 atomic_t refcnt;
David S. Millerd9452e92008-03-04 12:28:49 -080028 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070029 spinlock_t rx_lock;
30 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
Neil Horman068c6e92006-06-26 00:04:27 -070031 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070032 struct sk_buff_head txq;
David Howells6d5aefb2006-12-05 19:36:26 +000033 struct delayed_work tx_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034};
35
36void netpoll_poll(struct netpoll *np);
37void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070038void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039int netpoll_parse_options(struct netpoll *np, char *opt);
40int netpoll_setup(struct netpoll *np);
41int netpoll_trap(void);
42void netpoll_set_trap(int trap);
43void netpoll_cleanup(struct netpoll *np);
44int __netpoll_rx(struct sk_buff *skb);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#ifdef CONFIG_NETPOLL
48static inline int netpoll_rx(struct sk_buff *skb)
49{
Jeff Moyer115c1d62005-06-22 22:05:31 -070050 struct netpoll_info *npinfo = skb->dev->npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070051 unsigned long flags;
52 int ret = 0;
Jeff Moyer115c1d62005-06-22 22:05:31 -070053
David S. Millerd9452e92008-03-04 12:28:49 -080054 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
Jeff Moyer115c1d62005-06-22 22:05:31 -070055 return 0;
56
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070057 spin_lock_irqsave(&npinfo->rx_lock, flags);
David S. Millerd9452e92008-03-04 12:28:49 -080058 /* check rx_flags again with the lock held */
59 if (npinfo->rx_flags && __netpoll_rx(skb))
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070060 ret = 1;
61 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
62
63 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65
Stephen Hemmingerbea33482007-10-03 16:41:36 -070066static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070068 if (!list_empty(&skb->dev->napi_list))
69 return netpoll_rx(skb);
70 return 0;
71}
72
73static inline void *netpoll_poll_lock(struct napi_struct *napi)
74{
75 struct net_device *dev = napi->dev;
76
Matt Mackall53fb95d2005-08-11 19:27:43 -070077 rcu_read_lock(); /* deal with race on ->npinfo */
Stephen Hemmingerbea33482007-10-03 16:41:36 -070078 if (dev && dev->npinfo) {
79 spin_lock(&napi->poll_lock);
80 napi->poll_owner = smp_processor_id();
81 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070083 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
Matt Mackall53fb95d2005-08-11 19:27:43 -070086static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070088 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -070089
Stephen Hemmingerbea33482007-10-03 16:41:36 -070090 if (napi) {
91 napi->poll_owner = -1;
92 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070094 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
Stephen Hemmingerbea33482007-10-03 16:41:36 -070097static inline void netpoll_netdev_init(struct net_device *dev)
98{
99 INIT_LIST_HEAD(&dev->napi_list);
100}
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#else
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700103static inline int netpoll_rx(struct sk_buff *skb)
104{
105 return 0;
106}
107static inline int netpoll_receive_skb(struct sk_buff *skb)
108{
109 return 0;
110}
111static inline void *netpoll_poll_lock(struct napi_struct *napi)
112{
113 return NULL;
114}
115static inline void netpoll_poll_unlock(void *have)
116{
117}
118static inline void netpoll_netdev_init(struct net_device *dev)
119{
120}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121#endif
122
123#endif