blob: 5dfa091c33478a0798369b8a911015b0e7c076bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct netpoll {
16 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070017 char dev_name[IFNAMSIZ];
18 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 void (*rx_hook)(struct netpoll *, int, char *, int);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070020
Harvey Harrisone7557af2009-03-28 15:38:31 +000021 __be32 local_ip, remote_ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080023 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000024
25 struct list_head rx; /* rx_np list element */
Jeff Moyer115c1d62005-06-22 22:05:31 -070026};
27
28struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070029 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000030
David S. Millerd9452e92008-03-04 12:28:49 -080031 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070032 spinlock_t rx_lock;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000033 struct list_head rx_np; /* netpolls that registered an rx_hook */
34
Neil Horman068c6e92006-06-26 00:04:27 -070035 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070036 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000037
David Howells6d5aefb2006-12-05 19:36:26 +000038 struct delayed_work tx_work;
WANG Cong0e34e932010-05-06 00:47:21 -070039
40 struct netpoll *netpoll;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041};
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070044void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045int netpoll_parse_options(struct netpoll *np, char *opt);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000046int __netpoll_setup(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047int netpoll_setup(struct netpoll *np);
48int netpoll_trap(void);
49void netpoll_set_trap(int trap);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000050void __netpoll_cleanup(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051void netpoll_cleanup(struct netpoll *np);
52int __netpoll_rx(struct sk_buff *skb);
Neil Hormanc2355e12010-10-13 16:01:49 +000053void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
54 struct net_device *dev);
55static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
56{
57 netpoll_send_skb_on_dev(np, skb, np->dev);
58}
59
Stephen Hemminger5de4a472006-10-26 15:46:55 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62#ifdef CONFIG_NETPOLL
David S. Millerffb27362010-05-06 01:20:10 -070063static inline bool netpoll_rx(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{
Herbert Xude85d992010-06-10 16:12:44 +000065 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070066 unsigned long flags;
David S. Millerffb27362010-05-06 01:20:10 -070067 bool ret = false;
Jeff Moyer115c1d62005-06-22 22:05:31 -070068
Herbert Xuf0f9dea2010-09-17 16:55:03 -070069 local_irq_save(flags);
Herbert Xud5f31fb2010-06-15 21:44:29 -070070 npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xude85d992010-06-10 16:12:44 +000071
Daniel Borkmann508e14b2010-01-12 14:27:30 +000072 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
Herbert Xude85d992010-06-10 16:12:44 +000073 goto out;
Jeff Moyer115c1d62005-06-22 22:05:31 -070074
Herbert Xuf0f9dea2010-09-17 16:55:03 -070075 spin_lock(&npinfo->rx_lock);
David S. Millerd9452e92008-03-04 12:28:49 -080076 /* check rx_flags again with the lock held */
77 if (npinfo->rx_flags && __netpoll_rx(skb))
David S. Millerffb27362010-05-06 01:20:10 -070078 ret = true;
Herbert Xuf0f9dea2010-09-17 16:55:03 -070079 spin_unlock(&npinfo->rx_lock);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070080
Herbert Xude85d992010-06-10 16:12:44 +000081out:
Herbert Xuf0f9dea2010-09-17 16:55:03 -070082 local_irq_restore(flags);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070083 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
Herbert Xud1c76af2009-03-16 10:50:02 -070086static inline int netpoll_rx_on(struct sk_buff *skb)
87{
Herbert Xud5f31fb2010-06-15 21:44:29 -070088 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xud1c76af2009-03-16 10:50:02 -070089
Daniel Borkmann508e14b2010-01-12 14:27:30 +000090 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
Herbert Xud1c76af2009-03-16 10:50:02 -070091}
92
Stephen Hemmingerbea33482007-10-03 16:41:36 -070093static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070095 if (!list_empty(&skb->dev->napi_list))
96 return netpoll_rx(skb);
97 return 0;
98}
99
100static inline void *netpoll_poll_lock(struct napi_struct *napi)
101{
102 struct net_device *dev = napi->dev;
103
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700104 if (dev && dev->npinfo) {
105 spin_lock(&napi->poll_lock);
106 napi->poll_owner = smp_processor_id();
107 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700109 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
Matt Mackall53fb95d2005-08-11 19:27:43 -0700112static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700114 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700115
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700116 if (napi) {
117 napi->poll_owner = -1;
118 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 }
120}
121
Herbert Xuc18370f2010-06-10 16:12:49 +0000122static inline int netpoll_tx_running(struct net_device *dev)
123{
124 return irqs_disabled();
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#else
John W. Linville969a6e52010-08-10 16:24:41 -0700128static inline bool netpoll_rx(struct sk_buff *skb)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700129{
130 return 0;
131}
Herbert Xud1c76af2009-03-16 10:50:02 -0700132static inline int netpoll_rx_on(struct sk_buff *skb)
133{
134 return 0;
135}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700136static inline int netpoll_receive_skb(struct sk_buff *skb)
137{
138 return 0;
139}
140static inline void *netpoll_poll_lock(struct napi_struct *napi)
141{
142 return NULL;
143}
144static inline void netpoll_poll_unlock(void *have)
145{
146}
147static inline void netpoll_netdev_init(struct net_device *dev)
148{
149}
Herbert Xuc18370f2010-06-10 16:12:49 +0000150static inline int netpoll_tx_running(struct net_device *dev)
151{
152 return 0;
153}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154#endif
155
156#endif