blob: fbfdb9d8d3a7f59b4788bdfd1cdb26c86f0a81ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Cong Wangb7394d22013-01-07 20:52:39 +000015union inet_addr {
16 __u32 all[4];
17 __be32 ip;
18 __be32 ip6[4];
19 struct in_addr in;
20 struct in6_addr in6;
21};
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023struct netpoll {
24 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070025 char dev_name[IFNAMSIZ];
26 const char *name;
Antonio Quartulli8fb479a2013-10-23 23:36:30 +020027 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 int offset, int len);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070029
Cong Wangb7394d22013-01-07 20:52:39 +000030 union inet_addr local_ip, remote_ip;
31 bool ipv6;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080033 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000034
35 struct list_head rx; /* rx_np list element */
Neil Horman2cde6ac2013-02-11 10:25:30 +000036 struct work_struct cleanup_work;
Jeff Moyer115c1d62005-06-22 22:05:31 -070037};
38
39struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070040 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000041
Neil Hormanca99ca12013-02-05 08:05:43 +000042 unsigned long rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070043 spinlock_t rx_lock;
Neil Hormanbd7c4b62013-04-30 05:35:05 +000044 struct semaphore dev_lock;
Antonio Quartulli8fb479a2013-10-23 23:36:30 +020045 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
Daniel Borkmann508e14b2010-01-12 14:27:30 +000046
Cong Wangb7394d22013-01-07 20:52:39 +000047 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070048 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000049
David Howells6d5aefb2006-12-05 19:36:26 +000050 struct delayed_work tx_work;
WANG Cong0e34e932010-05-06 00:47:21 -070051
52 struct netpoll *netpoll;
Amerigo Wang38e6bc12012-08-10 01:24:38 +000053 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054};
55
Neil Hormanca99ca12013-02-05 08:05:43 +000056#ifdef CONFIG_NETPOLL
dingtianhongda6e3782013-05-27 19:53:31 +000057extern void netpoll_rx_disable(struct net_device *dev);
Neil Hormanca99ca12013-02-05 08:05:43 +000058extern void netpoll_rx_enable(struct net_device *dev);
59#else
dingtianhongda6e3782013-05-27 19:53:31 +000060static inline void netpoll_rx_disable(struct net_device *dev) { return; }
Neil Hormanca99ca12013-02-05 08:05:43 +000061static inline void netpoll_rx_enable(struct net_device *dev) { return; }
62#endif
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070065void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066int netpoll_parse_options(struct netpoll *np, char *opt);
Amerigo Wang47be03a22012-08-10 01:24:37 +000067int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068int netpoll_setup(struct netpoll *np);
69int netpoll_trap(void);
70void netpoll_set_trap(int trap);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000071void __netpoll_cleanup(struct netpoll *np);
Neil Horman2cde6ac2013-02-11 10:25:30 +000072void __netpoll_free_async(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073void netpoll_cleanup(struct netpoll *np);
Amerigo Wang57c5d462012-08-10 01:24:40 +000074int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
Neil Hormanc2355e12010-10-13 16:01:49 +000075void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
76 struct net_device *dev);
77static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
78{
Amerigo Wang28996562012-08-10 01:24:42 +000079 unsigned long flags;
80 local_irq_save(flags);
Neil Hormanc2355e12010-10-13 16:01:49 +000081 netpoll_send_skb_on_dev(np, skb, np->dev);
Amerigo Wang28996562012-08-10 01:24:42 +000082 local_irq_restore(flags);
Neil Hormanc2355e12010-10-13 16:01:49 +000083}
84
Stephen Hemminger5de4a472006-10-26 15:46:55 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87#ifdef CONFIG_NETPOLL
Amerigo Wang77ab8a52012-08-10 01:24:46 +000088static inline bool netpoll_rx_on(struct sk_buff *skb)
Amerigo Wang91fe4a42012-08-10 01:24:41 +000089{
90 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
91
92 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
93}
94
David S. Millerffb27362010-05-06 01:20:10 -070095static inline bool netpoll_rx(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Herbert Xude85d992010-06-10 16:12:44 +000097 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070098 unsigned long flags;
David S. Millerffb27362010-05-06 01:20:10 -070099 bool ret = false;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700100
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700101 local_irq_save(flags);
Herbert Xude85d992010-06-10 16:12:44 +0000102
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000103 if (!netpoll_rx_on(skb))
Herbert Xude85d992010-06-10 16:12:44 +0000104 goto out;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700105
Amerigo Wang91fe4a42012-08-10 01:24:41 +0000106 npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700107 spin_lock(&npinfo->rx_lock);
David S. Millerd9452e92008-03-04 12:28:49 -0800108 /* check rx_flags again with the lock held */
Amerigo Wang57c5d462012-08-10 01:24:40 +0000109 if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
David S. Millerffb27362010-05-06 01:20:10 -0700110 ret = true;
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700111 spin_unlock(&npinfo->rx_lock);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700112
Herbert Xude85d992010-06-10 16:12:44 +0000113out:
Herbert Xuf0f9dea2010-09-17 16:55:03 -0700114 local_irq_restore(flags);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700115 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700118static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700120 if (!list_empty(&skb->dev->napi_list))
121 return netpoll_rx(skb);
122 return 0;
123}
124
125static inline void *netpoll_poll_lock(struct napi_struct *napi)
126{
127 struct net_device *dev = napi->dev;
128
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700129 if (dev && dev->npinfo) {
130 spin_lock(&napi->poll_lock);
131 napi->poll_owner = smp_processor_id();
132 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700134 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
Matt Mackall53fb95d2005-08-11 19:27:43 -0700137static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700139 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700140
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700141 if (napi) {
142 napi->poll_owner = -1;
143 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 }
145}
146
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000147static inline bool netpoll_tx_running(struct net_device *dev)
Herbert Xuc18370f2010-06-10 16:12:49 +0000148{
149 return irqs_disabled();
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152#else
John W. Linville969a6e52010-08-10 16:24:41 -0700153static inline bool netpoll_rx(struct sk_buff *skb)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700154{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000155 return false;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700156}
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000157static inline bool netpoll_rx_on(struct sk_buff *skb)
Herbert Xud1c76af2009-03-16 10:50:02 -0700158{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000159 return false;
Herbert Xud1c76af2009-03-16 10:50:02 -0700160}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700161static inline int netpoll_receive_skb(struct sk_buff *skb)
162{
163 return 0;
164}
165static inline void *netpoll_poll_lock(struct napi_struct *napi)
166{
167 return NULL;
168}
169static inline void netpoll_poll_unlock(void *have)
170{
171}
172static inline void netpoll_netdev_init(struct net_device *dev)
173{
174}
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000175static inline bool netpoll_tx_running(struct net_device *dev)
Herbert Xuc18370f2010-06-10 16:12:49 +0000176{
Amerigo Wang77ab8a52012-08-10 01:24:46 +0000177 return false;
Herbert Xuc18370f2010-06-10 16:12:49 +0000178}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179#endif
180
181#endif