blob: 907812efb4d9befe18037ccd6bfe13c45b656862 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7#ifndef _LINUX_NETPOLL_H
8#define _LINUX_NETPOLL_H
9
10#include <linux/netdevice.h>
11#include <linux/interrupt.h>
Matt Mackall53fb95d2005-08-11 19:27:43 -070012#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/list.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015struct netpoll {
16 struct net_device *dev;
Stephen Hemmingerbf6bce72006-10-26 15:46:56 -070017 char dev_name[IFNAMSIZ];
18 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 void (*rx_hook)(struct netpoll *, int, char *, int);
Stephen Hemminger5de4a472006-10-26 15:46:55 -070020
Harvey Harrisone7557af2009-03-28 15:38:31 +000021 __be32 local_ip, remote_ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 u16 local_port, remote_port;
Stephen Hemminger09538642007-11-19 19:23:29 -080023 u8 remote_mac[ETH_ALEN];
Daniel Borkmann508e14b2010-01-12 14:27:30 +000024
25 struct list_head rx; /* rx_np list element */
Amerigo Wang38e6bc12012-08-10 01:24:38 +000026 struct rcu_head rcu;
Jeff Moyer115c1d62005-06-22 22:05:31 -070027};
28
29struct netpoll_info {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -070030 atomic_t refcnt;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000031
David S. Millerd9452e92008-03-04 12:28:49 -080032 int rx_flags;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070033 spinlock_t rx_lock;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000034 struct list_head rx_np; /* netpolls that registered an rx_hook */
35
Neil Horman068c6e92006-06-26 00:04:27 -070036 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070037 struct sk_buff_head txq;
Daniel Borkmann508e14b2010-01-12 14:27:30 +000038
David Howells6d5aefb2006-12-05 19:36:26 +000039 struct delayed_work tx_work;
WANG Cong0e34e932010-05-06 00:47:21 -070040
41 struct netpoll *netpoll;
Amerigo Wang38e6bc12012-08-10 01:24:38 +000042 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043};
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
Satyam Sharma0bcc1812007-08-10 15:35:05 -070046void netpoll_print_options(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047int netpoll_parse_options(struct netpoll *np, char *opt);
Amerigo Wang47be03a22012-08-10 01:24:37 +000048int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049int netpoll_setup(struct netpoll *np);
50int netpoll_trap(void);
51void netpoll_set_trap(int trap);
Herbert Xu8fdd95e2010-06-10 16:12:48 +000052void __netpoll_cleanup(struct netpoll *np);
Amerigo Wang38e6bc12012-08-10 01:24:38 +000053void __netpoll_free_rcu(struct netpoll *np);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054void netpoll_cleanup(struct netpoll *np);
55int __netpoll_rx(struct sk_buff *skb);
Neil Hormanc2355e12010-10-13 16:01:49 +000056void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
57 struct net_device *dev);
58static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
59{
60 netpoll_send_skb_on_dev(np, skb, np->dev);
61}
62
Stephen Hemminger5de4a472006-10-26 15:46:55 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#ifdef CONFIG_NETPOLL
David S. Millerffb27362010-05-06 01:20:10 -070066static inline bool netpoll_rx(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Herbert Xude85d992010-06-10 16:12:44 +000068 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070069 unsigned long flags;
David S. Millerffb27362010-05-06 01:20:10 -070070 bool ret = false;
Jeff Moyer115c1d62005-06-22 22:05:31 -070071
Herbert Xuf0f9dea2010-09-17 16:55:03 -070072 local_irq_save(flags);
Herbert Xud5f31fb2010-06-15 21:44:29 -070073 npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xude85d992010-06-10 16:12:44 +000074
Daniel Borkmann508e14b2010-01-12 14:27:30 +000075 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
Herbert Xude85d992010-06-10 16:12:44 +000076 goto out;
Jeff Moyer115c1d62005-06-22 22:05:31 -070077
Herbert Xuf0f9dea2010-09-17 16:55:03 -070078 spin_lock(&npinfo->rx_lock);
David S. Millerd9452e92008-03-04 12:28:49 -080079 /* check rx_flags again with the lock held */
80 if (npinfo->rx_flags && __netpoll_rx(skb))
David S. Millerffb27362010-05-06 01:20:10 -070081 ret = true;
Herbert Xuf0f9dea2010-09-17 16:55:03 -070082 spin_unlock(&npinfo->rx_lock);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070083
Herbert Xude85d992010-06-10 16:12:44 +000084out:
Herbert Xuf0f9dea2010-09-17 16:55:03 -070085 local_irq_restore(flags);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -070086 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Herbert Xud1c76af2009-03-16 10:50:02 -070089static inline int netpoll_rx_on(struct sk_buff *skb)
90{
Herbert Xud5f31fb2010-06-15 21:44:29 -070091 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
Herbert Xud1c76af2009-03-16 10:50:02 -070092
Daniel Borkmann508e14b2010-01-12 14:27:30 +000093 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
Herbert Xud1c76af2009-03-16 10:50:02 -070094}
95
Stephen Hemmingerbea33482007-10-03 16:41:36 -070096static inline int netpoll_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070098 if (!list_empty(&skb->dev->napi_list))
99 return netpoll_rx(skb);
100 return 0;
101}
102
103static inline void *netpoll_poll_lock(struct napi_struct *napi)
104{
105 struct net_device *dev = napi->dev;
106
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700107 if (dev && dev->npinfo) {
108 spin_lock(&napi->poll_lock);
109 napi->poll_owner = smp_processor_id();
110 return napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 }
Matt Mackall53fb95d2005-08-11 19:27:43 -0700112 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113}
114
Matt Mackall53fb95d2005-08-11 19:27:43 -0700115static inline void netpoll_poll_unlock(void *have)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700117 struct napi_struct *napi = have;
Matt Mackall53fb95d2005-08-11 19:27:43 -0700118
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700119 if (napi) {
120 napi->poll_owner = -1;
121 spin_unlock(&napi->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 }
123}
124
Herbert Xuc18370f2010-06-10 16:12:49 +0000125static inline int netpoll_tx_running(struct net_device *dev)
126{
127 return irqs_disabled();
128}
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#else
John W. Linville969a6e52010-08-10 16:24:41 -0700131static inline bool netpoll_rx(struct sk_buff *skb)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700132{
133 return 0;
134}
Herbert Xud1c76af2009-03-16 10:50:02 -0700135static inline int netpoll_rx_on(struct sk_buff *skb)
136{
137 return 0;
138}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700139static inline int netpoll_receive_skb(struct sk_buff *skb)
140{
141 return 0;
142}
143static inline void *netpoll_poll_lock(struct napi_struct *napi)
144{
145 return NULL;
146}
147static inline void netpoll_poll_unlock(void *have)
148{
149}
150static inline void netpoll_netdev_init(struct net_device *dev)
151{
152}
Herbert Xuc18370f2010-06-10 16:12:49 +0000153static inline int netpoll_tx_running(struct net_device *dev)
154{
155 return 0;
156}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#endif
158
159#endif