blob: 79358bb712c6905e474a55b09b1edae58c3a5c43 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct netpoll {
16 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070017 char dev_name[IFNAMSIZ];
18 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 void (*rx_hook)(struct netpoll *, int, char *, int);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070020
Harvey Harrisone7557af2009-03-28 15:38:31 +000021 __be32 local_ip, remote_ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080023 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000024
25 struct list_head rx; /* rx_np list element */
Jeff Moyer115c1d62005-06-22 22:05:31 -070026};
27
28struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070029 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000030
David S. Millerd9452e92008-03-04 12:28:49 -080031 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070032 spinlock_t rx_lock;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000033 struct list_head rx_np; /* netpolls that registered an rx_hook */
34
Neil Horman068c6e92006-06-26 00:04:27 -070035 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070036 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000037
David Howells6d5aefb2006-12-05 19:36:26 +000038 struct delayed_work tx_work;
WANG Cong0e34e932010-05-06 00:47:21 -070039
40 struct netpoll *netpoll;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041};
42
WANG Cong0e34e932010-05-06 00:47:21 -070043void netpoll_poll_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044void netpoll_poll(struct netpoll *np);
45void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070046void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047int netpoll_parse_options(struct netpoll *np, char *opt);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000048int __netpoll_setup(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049int netpoll_setup(struct netpoll *np);
50int netpoll_trap(void);
51void netpoll_set_trap(int trap);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000052void __netpoll_cleanup(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053void netpoll_cleanup(struct netpoll *np);
54int __netpoll_rx(struct sk_buff *skb);
Neil Hormanc2355e12010-10-13 16:01:49 +000055void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
56 struct net_device *dev);
57static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
58{
59 netpoll_send_skb_on_dev(np, skb, np->dev);
60}
61
Stephen Hemminger5de4a472006-10-26 15:46:55 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#ifdef CONFIG_NETPOLL
David S. Millerffb27362010-05-06 01:20:10 -070065static inline bool netpoll_rx(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Herbert Xude85d992010-06-10 16:12:44 +000067 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070068 unsigned long flags;
David S. Millerffb27362010-05-06 01:20:10 -070069 bool ret = false;
Jeff Moyer115c1d62005-06-22 22:05:31 -070070
Herbert Xuf0f9dea2010-09-17 16:55:03 -070071 local_irq_save(flags);
Herbert Xud5f31fb2010-06-15 21:44:29 -070072 npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xude85d992010-06-10 16:12:44 +000073
Daniel Borkmann508e14b2010-01-12 14:27:30 +000074 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
Herbert Xude85d992010-06-10 16:12:44 +000075 goto out;
Jeff Moyer115c1d62005-06-22 22:05:31 -070076
Herbert Xuf0f9dea2010-09-17 16:55:03 -070077 spin_lock(&npinfo->rx_lock);
David S. Millerd9452e92008-03-04 12:28:49 -080078 /* check rx_flags again with the lock held */
79 if (npinfo->rx_flags && __netpoll_rx(skb))
David S. Millerffb27362010-05-06 01:20:10 -070080 ret = true;
Herbert Xuf0f9dea2010-09-17 16:55:03 -070081 spin_unlock(&npinfo->rx_lock);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070082
Herbert Xude85d992010-06-10 16:12:44 +000083out:
Herbert Xuf0f9dea2010-09-17 16:55:03 -070084 local_irq_restore(flags);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070085 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
Herbert Xud1c76af2009-03-16 10:50:02 -070088static inline int netpoll_rx_on(struct sk_buff *skb)
89{
Herbert Xud5f31fb2010-06-15 21:44:29 -070090 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xud1c76af2009-03-16 10:50:02 -070091
Daniel Borkmann508e14b2010-01-12 14:27:30 +000092 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
Herbert Xud1c76af2009-03-16 10:50:02 -070093}
94
Stephen Hemmingerbea33482007-10-03 16:41:36 -070095static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070097 if (!list_empty(&skb->dev->napi_list))
98 return netpoll_rx(skb);
99 return 0;
100}
101
102static inline void *netpoll_poll_lock(struct napi_struct *napi)
103{
104 struct net_device *dev = napi->dev;
105
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700106 if (dev && dev->npinfo) {
107 spin_lock(&napi->poll_lock);
108 napi->poll_owner = smp_processor_id();
109 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700111 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
Matt Mackall53fb95d2005-08-11 19:27:43 -0700114static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700116 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700117
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700118 if (napi) {
119 napi->poll_owner = -1;
120 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 }
122}
123
Herbert Xuc18370f2010-06-10 16:12:49 +0000124static inline int netpoll_tx_running(struct net_device *dev)
125{
126 return irqs_disabled();
127}
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#else
John W. Linville969a6e52010-08-10 16:24:41 -0700130static inline bool netpoll_rx(struct sk_buff *skb)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700131{
132 return 0;
133}
Herbert Xud1c76af2009-03-16 10:50:02 -0700134static inline int netpoll_rx_on(struct sk_buff *skb)
135{
136 return 0;
137}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700138static inline int netpoll_receive_skb(struct sk_buff *skb)
139{
140 return 0;
141}
142static inline void *netpoll_poll_lock(struct napi_struct *napi)
143{
144 return NULL;
145}
146static inline void netpoll_poll_unlock(void *have)
147{
148}
149static inline void netpoll_netdev_init(struct net_device *dev)
150{
151}
Herbert Xuc18370f2010-06-10 16:12:49 +0000152static inline int netpoll_tx_running(struct net_device *dev)
153{
154 return 0;
155}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#endif
157
158#endif