blob: 2524267210d308894317f00e057c0d61bf81098d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct netpoll {
16 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070017 char dev_name[IFNAMSIZ];
18 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 void (*rx_hook)(struct netpoll *, int, char *, int);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070020
Harvey Harrisone7557af2009-03-28 15:38:31 +000021 __be32 local_ip, remote_ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080023 u8 remote_mac[ETH_ALEN];
Jeff Moyer115c1d62005-06-22 22:05:31 -070024};
25
26struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070027 atomic_t refcnt;
David S. Millerd9452e92008-03-04 12:28:49 -080028 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070029 spinlock_t rx_lock;
30 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
Neil Horman068c6e92006-06-26 00:04:27 -070031 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070032 struct sk_buff_head txq;
David Howells6d5aefb2006-12-05 19:36:26 +000033 struct delayed_work tx_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034};
35
36void netpoll_poll(struct netpoll *np);
37void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070038void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039int netpoll_parse_options(struct netpoll *np, char *opt);
40int netpoll_setup(struct netpoll *np);
41int netpoll_trap(void);
42void netpoll_set_trap(int trap);
43void netpoll_cleanup(struct netpoll *np);
44int __netpoll_rx(struct sk_buff *skb);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#ifdef CONFIG_NETPOLL
48static inline int netpoll_rx(struct sk_buff *skb)
49{
Jeff Moyer115c1d62005-06-22 22:05:31 -070050 struct netpoll_info *npinfo = skb->dev->npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070051 unsigned long flags;
52 int ret = 0;
Jeff Moyer115c1d62005-06-22 22:05:31 -070053
David S. Millerd9452e92008-03-04 12:28:49 -080054 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
Jeff Moyer115c1d62005-06-22 22:05:31 -070055 return 0;
56
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070057 spin_lock_irqsave(&npinfo->rx_lock, flags);
David S. Millerd9452e92008-03-04 12:28:49 -080058 /* check rx_flags again with the lock held */
59 if (npinfo->rx_flags && __netpoll_rx(skb))
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070060 ret = 1;
61 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
62
63 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65
Herbert Xud1c76af2009-03-16 10:50:02 -070066static inline int netpoll_rx_on(struct sk_buff *skb)
67{
68 struct netpoll_info *npinfo = skb->dev->npinfo;
69
70 return npinfo && (npinfo->rx_np || npinfo->rx_flags);
71}
72
Stephen Hemmingerbea33482007-10-03 16:41:36 -070073static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070075 if (!list_empty(&skb->dev->napi_list))
76 return netpoll_rx(skb);
77 return 0;
78}
79
80static inline void *netpoll_poll_lock(struct napi_struct *napi)
81{
82 struct net_device *dev = napi->dev;
83
Matt Mackall53fb95d2005-08-11 19:27:43 -070084 rcu_read_lock(); /* deal with race on ->npinfo */
Stephen Hemmingerbea33482007-10-03 16:41:36 -070085 if (dev && dev->npinfo) {
86 spin_lock(&napi->poll_lock);
87 napi->poll_owner = smp_processor_id();
88 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 }
Matt Mackall53fb95d2005-08-11 19:27:43 -070090 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Matt Mackall53fb95d2005-08-11 19:27:43 -070093static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070095 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -070096
Stephen Hemmingerbea33482007-10-03 16:41:36 -070097 if (napi) {
98 napi->poll_owner = -1;
99 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700101 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
103
104#else
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700105static inline int netpoll_rx(struct sk_buff *skb)
106{
107 return 0;
108}
Herbert Xud1c76af2009-03-16 10:50:02 -0700109static inline int netpoll_rx_on(struct sk_buff *skb)
110{
111 return 0;
112}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700113static inline int netpoll_receive_skb(struct sk_buff *skb)
114{
115 return 0;
116}
117static inline void *netpoll_poll_lock(struct napi_struct *napi)
118{
119 return NULL;
120}
121static inline void netpoll_poll_unlock(void *have)
122{
123}
124static inline void netpoll_netdev_init(struct net_device *dev)
125{
126}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#endif
128
129#endif