blob: 791d5109f34c12207de65f06cca05fa4b35b44b8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct netpoll {
16 struct net_device *dev;
WANG Cong0e34e932010-05-06 00:47:21 -070017 struct net_device *real_dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070018 char dev_name[IFNAMSIZ];
19 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 void (*rx_hook)(struct netpoll *, int, char *, int);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070021
Harvey Harrisone7557af2009-03-28 15:38:31 +000022 __be32 local_ip, remote_ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080024 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000025
26 struct list_head rx; /* rx_np list element */
Jeff Moyer115c1d62005-06-22 22:05:31 -070027};
28
29struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070030 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000031
David S. Millerd9452e92008-03-04 12:28:49 -080032 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070033 spinlock_t rx_lock;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000034 struct list_head rx_np; /* netpolls that registered an rx_hook */
35
Neil Horman068c6e92006-06-26 00:04:27 -070036 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070037 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000038
David Howells6d5aefb2006-12-05 19:36:26 +000039 struct delayed_work tx_work;
WANG Cong0e34e932010-05-06 00:47:21 -070040
41 struct netpoll *netpoll;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042};
43
WANG Cong0e34e932010-05-06 00:47:21 -070044void netpoll_poll_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045void netpoll_poll(struct netpoll *np);
46void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070047void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048int netpoll_parse_options(struct netpoll *np, char *opt);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000049int __netpoll_setup(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050int netpoll_setup(struct netpoll *np);
51int netpoll_trap(void);
52void netpoll_set_trap(int trap);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000053void __netpoll_cleanup(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054void netpoll_cleanup(struct netpoll *np);
55int __netpoll_rx(struct sk_buff *skb);
WANG Cong0e34e932010-05-06 00:47:21 -070056void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070057
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#ifdef CONFIG_NETPOLL
David S. Millerffb27362010-05-06 01:20:10 -070060static inline bool netpoll_rx(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Herbert Xude85d992010-06-10 16:12:44 +000062 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070063 unsigned long flags;
David S. Millerffb27362010-05-06 01:20:10 -070064 bool ret = false;
Jeff Moyer115c1d62005-06-22 22:05:31 -070065
Herbert Xude85d992010-06-10 16:12:44 +000066 rcu_read_lock_bh();
Herbert Xud5f31fb2010-06-15 21:44:29 -070067 npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xude85d992010-06-10 16:12:44 +000068
Daniel Borkmann508e14b2010-01-12 14:27:30 +000069 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
Herbert Xude85d992010-06-10 16:12:44 +000070 goto out;
Jeff Moyer115c1d62005-06-22 22:05:31 -070071
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070072 spin_lock_irqsave(&npinfo->rx_lock, flags);
David S. Millerd9452e92008-03-04 12:28:49 -080073 /* check rx_flags again with the lock held */
74 if (npinfo->rx_flags && __netpoll_rx(skb))
David S. Millerffb27362010-05-06 01:20:10 -070075 ret = true;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070076 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
77
Herbert Xude85d992010-06-10 16:12:44 +000078out:
79 rcu_read_unlock_bh();
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070080 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Herbert Xud1c76af2009-03-16 10:50:02 -070083static inline int netpoll_rx_on(struct sk_buff *skb)
84{
Herbert Xud5f31fb2010-06-15 21:44:29 -070085 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xud1c76af2009-03-16 10:50:02 -070086
Daniel Borkmann508e14b2010-01-12 14:27:30 +000087 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
Herbert Xud1c76af2009-03-16 10:50:02 -070088}
89
Stephen Hemmingerbea33482007-10-03 16:41:36 -070090static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070092 if (!list_empty(&skb->dev->napi_list))
93 return netpoll_rx(skb);
94 return 0;
95}
96
97static inline void *netpoll_poll_lock(struct napi_struct *napi)
98{
99 struct net_device *dev = napi->dev;
100
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700101 if (dev && dev->npinfo) {
102 spin_lock(&napi->poll_lock);
103 napi->poll_owner = smp_processor_id();
104 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700106 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
Matt Mackall53fb95d2005-08-11 19:27:43 -0700109static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700111 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700112
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700113 if (napi) {
114 napi->poll_owner = -1;
115 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 }
117}
118
Herbert Xuc18370f2010-06-10 16:12:49 +0000119static inline int netpoll_tx_running(struct net_device *dev)
120{
121 return irqs_disabled();
122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#else
John W. Linville969a6e52010-08-10 16:24:41 -0700125static inline bool netpoll_rx(struct sk_buff *skb)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700126{
127 return 0;
128}
Herbert Xud1c76af2009-03-16 10:50:02 -0700129static inline int netpoll_rx_on(struct sk_buff *skb)
130{
131 return 0;
132}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700133static inline int netpoll_receive_skb(struct sk_buff *skb)
134{
135 return 0;
136}
137static inline void *netpoll_poll_lock(struct napi_struct *napi)
138{
139 return NULL;
140}
141static inline void netpoll_poll_unlock(void *have)
142{
143}
144static inline void netpoll_netdev_init(struct net_device *dev)
145{
146}
Herbert Xuc18370f2010-06-10 16:12:49 +0000147static inline int netpoll_tx_running(struct net_device *dev)
148{
149 return 0;
150}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151#endif
152
153#endif