blob: 9675f312830dd762bcd530eee90569de5a89b320 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020015#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/inetdevice.h>
17#include <linux/inet.h>
18#include <linux/interrupt.h>
19#include <linux/netpoll.h>
20#include <linux/sched.h>
21#include <linux/delay.h>
22#include <linux/rcupdate.h>
23#include <linux/workqueue.h>
24#include <net/tcp.h>
25#include <net/udp.h>
26#include <asm/unaligned.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -070027#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29/*
30 * We maintain a small pool of fully-sized skbs, to make sure the
31 * message gets out even in extreme OOM situations.
32 */
33
34#define MAX_UDP_CHUNK 1460
35#define MAX_SKBS 32
36#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080038static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40static atomic_t trapped;
41
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070042#define USEC_PER_POLL 50
David S. Millerd9452e92008-03-04 12:28:49 -080043#define NETPOLL_RX_ENABLED 1
44#define NETPOLL_RX_DROP 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#define MAX_SKB_SIZE \
47 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
48 sizeof(struct iphdr) + sizeof(struct ethhdr))
49
50static void zap_completion_queue(void);
Neil Horman068c6e92006-06-26 00:04:27 -070051static void arp_reply(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
David Howellsc4028952006-11-22 14:57:56 +000053static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
David Howells4c1ac1b2006-12-05 14:37:56 +000055 struct netpoll_info *npinfo =
56 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010058 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070060 while ((skb = skb_dequeue(&npinfo->txq))) {
61 struct net_device *dev = skb->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -080062 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -070063 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070065 if (!netif_device_present(dev) || !netif_running(dev)) {
66 __kfree_skb(skb);
67 continue;
68 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
David S. Millerfd2ea0a2008-07-17 01:56:23 -070070 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
71
Ingo Molnar36405432006-12-12 17:20:42 +010072 local_irq_save(flags);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070073 __netif_tx_lock(txq, smp_processor_id());
74 if (netif_tx_queue_stopped(txq) ||
David S. Millerc3f26a22008-07-31 16:58:50 -070075 netif_tx_queue_frozen(txq) ||
Stephen Hemminger00829822008-11-20 20:14:53 -080076 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070077 skb_queue_head(&npinfo->txq, skb);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070078 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010079 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Jarek Poplawski25442ca2007-07-05 17:42:44 -070081 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070082 return;
83 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -070084 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010085 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 }
87}
88
Al Virob51655b2006-11-14 21:40:42 -080089static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
90 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Al Virod6f5493c2006-11-14 21:26:08 -080092 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -080093
Herbert Xu60476372007-04-09 11:59:39 -070094 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 return 0;
96
Herbert Xufb286bb2005-11-10 13:01:24 -080097 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Patrick McHardy84fa7932006-08-29 16:44:56 -070099 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -0800100 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -0800101 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Herbert Xufb286bb2005-11-10 13:01:24 -0800103 skb->csum = psum;
104
105 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106}
107
108/*
109 * Check whether delayed processing was scheduled for our NIC. If so,
110 * we attempt to grab the poll lock and use ->poll() to pump the card.
111 * If this fails, either we've recursed in ->poll() or it's already
112 * running on another CPU.
113 *
114 * Note: we don't mask interrupts with this lock because we're using
115 * trylock here and interrupts are already disabled in the softirq
116 * case. Further, we test the poll_owner to avoid recursion on UP
117 * systems where the lock doesn't exist.
118 *
119 * In cases where there is bi-directional communications, reading only
120 * one message at a time can lead to packets being dropped by the
121 * network adapter, forcing superfluous retries and possibly timeouts.
122 * Thus, we set our budget to greater than 1.
123 */
David S. Miller0a7606c2007-10-29 21:28:47 -0700124static int poll_one_napi(struct netpoll_info *npinfo,
125 struct napi_struct *napi, int budget)
126{
127 int work;
128
129 /* net_rx_action's ->poll() invocations and our's are
130 * synchronized by this test which is only made while
131 * holding the napi->poll_lock.
132 */
133 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
134 return budget;
135
David S. Millerd9452e92008-03-04 12:28:49 -0800136 npinfo->rx_flags |= NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700137 atomic_inc(&trapped);
Neil Horman7b363e42008-12-09 23:22:26 -0800138 set_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700139
140 work = napi->poll(napi, budget);
David S. Miller7d18f112009-05-21 23:30:09 -0700141 trace_napi_poll(napi);
David S. Miller0a7606c2007-10-29 21:28:47 -0700142
Neil Horman7b363e42008-12-09 23:22:26 -0800143 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700144 atomic_dec(&trapped);
David S. Millerd9452e92008-03-04 12:28:49 -0800145 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700146
147 return budget - work;
148}
149
Stephen Hemminger51069302007-11-19 19:18:11 -0800150static void poll_napi(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700152 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 int budget = 16;
154
Stephen Hemminger51069302007-11-19 19:18:11 -0800155 list_for_each_entry(napi, &dev->napi_list, dev_list) {
David S. Miller0a7606c2007-10-29 21:28:47 -0700156 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700157 spin_trylock(&napi->poll_lock)) {
Stephen Hemminger51069302007-11-19 19:18:11 -0800158 budget = poll_one_napi(dev->npinfo, napi, budget);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700159 spin_unlock(&napi->poll_lock);
David S. Miller0a7606c2007-10-29 21:28:47 -0700160
161 if (!budget)
162 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700163 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 }
165}
166
Neil Horman068c6e92006-06-26 00:04:27 -0700167static void service_arp_queue(struct netpoll_info *npi)
168{
Stephen Hemminger51069302007-11-19 19:18:11 -0800169 if (npi) {
170 struct sk_buff *skb;
Neil Horman068c6e92006-06-26 00:04:27 -0700171
Stephen Hemminger51069302007-11-19 19:18:11 -0800172 while ((skb = skb_dequeue(&npi->arp_tx)))
173 arp_reply(skb);
Neil Horman068c6e92006-06-26 00:04:27 -0700174 }
Neil Horman068c6e92006-06-26 00:04:27 -0700175}
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177void netpoll_poll(struct netpoll *np)
178{
Stephen Hemminger51069302007-11-19 19:18:11 -0800179 struct net_device *dev = np->dev;
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000180 const struct net_device_ops *ops;
Stephen Hemminger51069302007-11-19 19:18:11 -0800181
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000182 if (!dev || !netif_running(dev))
183 return;
184
185 ops = dev->netdev_ops;
186 if (!ops->ndo_poll_controller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 return;
188
189 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800190 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Stephen Hemminger51069302007-11-19 19:18:11 -0800192 poll_napi(dev);
193
194 service_arp_queue(dev->npinfo);
Neil Horman068c6e92006-06-26 00:04:27 -0700195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 zap_completion_queue();
197}
198
199static void refill_skbs(void)
200{
201 struct sk_buff *skb;
202 unsigned long flags;
203
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800204 spin_lock_irqsave(&skb_pool.lock, flags);
205 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
207 if (!skb)
208 break;
209
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800210 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800212 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
215static void zap_completion_queue(void)
216{
217 unsigned long flags;
218 struct softnet_data *sd = &get_cpu_var(softnet_data);
219
220 if (sd->completion_queue) {
221 struct sk_buff *clist;
222
223 local_irq_save(flags);
224 clist = sd->completion_queue;
225 sd->completion_queue = NULL;
226 local_irq_restore(flags);
227
228 while (clist != NULL) {
229 struct sk_buff *skb = clist;
230 clist = clist->next;
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700231 if (skb->destructor) {
232 atomic_inc(&skb->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 dev_kfree_skb_any(skb); /* put this one back */
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700234 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 __kfree_skb(skb);
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 }
238 }
239
240 put_cpu_var(softnet_data);
241}
242
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800243static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800245 int count = 0;
246 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800249 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800253 if (!skb)
254 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800257 if (++count < 10) {
258 netpoll_poll(np);
259 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800261 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 }
263
264 atomic_set(&skb->users, 1);
265 skb_reserve(skb, reserve);
266 return skb;
267}
268
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700269static int netpoll_owner_active(struct net_device *dev)
270{
271 struct napi_struct *napi;
272
273 list_for_each_entry(napi, &dev->napi_list, dev_list) {
274 if (napi->poll_owner == smp_processor_id())
275 return 1;
276 }
277 return 0;
278}
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
281{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700282 int status = NETDEV_TX_BUSY;
283 unsigned long tries;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900284 struct net_device *dev = np->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -0800285 const struct net_device_ops *ops = dev->netdev_ops;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900286 struct netpoll_info *npinfo = np->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900288 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
289 __kfree_skb(skb);
290 return;
291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700293 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700294 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700295 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800296 unsigned long flags;
297
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700298 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
299
Andrew Mortona49f99f2006-12-11 17:24:46 -0800300 local_irq_save(flags);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700301 /* try until next clock tick */
302 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
303 tries > 0; --tries) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700304 if (__netif_tx_trylock(txq)) {
Eric Dumazet08baf562009-05-25 22:58:01 -0700305 if (!netif_tx_queue_stopped(txq)) {
Stephen Hemminger00829822008-11-20 20:14:53 -0800306 status = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -0700307 if (status == NETDEV_TX_OK)
308 txq_trans_update(txq);
309 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700310 __netif_tx_unlock(txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700311
Andrew Mortone37b8d92006-12-09 14:01:49 -0800312 if (status == NETDEV_TX_OK)
313 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Andrew Mortone37b8d92006-12-09 14:01:49 -0800315 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700316
317 /* tickle device maybe there is some cleanup */
318 netpoll_poll(np);
319
320 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700321 }
Andrew Mortona49f99f2006-12-11 17:24:46 -0800322 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700325 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700326 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000327 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
331void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
332{
333 int total_len, eth_len, ip_len, udp_len;
334 struct sk_buff *skb;
335 struct udphdr *udph;
336 struct iphdr *iph;
337 struct ethhdr *eth;
338
339 udp_len = len + sizeof(*udph);
340 ip_len = eth_len = udp_len + sizeof(*iph);
341 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
342
343 skb = find_skb(np, total_len, total_len - len);
344 if (!skb)
345 return;
346
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300347 skb_copy_to_linear_data(skb, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 skb->len += len;
349
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300350 skb_push(skb, sizeof(*udph));
351 skb_reset_transport_header(skb);
352 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 udph->source = htons(np->local_port);
354 udph->dest = htons(np->remote_port);
355 udph->len = htons(udp_len);
356 udph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000357 udph->check = csum_tcpudp_magic(np->local_ip,
358 np->remote_ip,
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800359 udp_len, IPPROTO_UDP,
Joe Perches07f07572008-11-19 15:44:53 -0800360 csum_partial(udph, udp_len, 0));
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800361 if (udph->check == 0)
Al Viro5e57dff2006-11-20 18:08:13 -0800362 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700364 skb_push(skb, sizeof(*iph));
365 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700366 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
368 /* iph->version = 4; iph->ihl = 5; */
369 put_unaligned(0x45, (unsigned char *)iph);
370 iph->tos = 0;
371 put_unaligned(htons(ip_len), &(iph->tot_len));
372 iph->id = 0;
373 iph->frag_off = 0;
374 iph->ttl = 64;
375 iph->protocol = IPPROTO_UDP;
376 iph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000377 put_unaligned(np->local_ip, &(iph->saddr));
378 put_unaligned(np->remote_ip, &(iph->daddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
380
381 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700382 skb_reset_mac_header(skb);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700383 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Stephen Hemminger09538642007-11-19 19:23:29 -0800384 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
385 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
387 skb->dev = np->dev;
388
389 netpoll_send_skb(np, skb);
390}
391
392static void arp_reply(struct sk_buff *skb)
393{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700394 struct netpoll_info *npinfo = skb->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 struct arphdr *arp;
396 unsigned char *arp_ptr;
397 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
Al Viro252e3342006-11-14 20:48:11 -0800398 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800399 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 struct sk_buff *send_skb;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700401 struct netpoll *np = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700403 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
404 np = npinfo->rx_np;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700405 if (!np)
406 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 /* No arp on this interface */
409 if (skb->dev->flags & IFF_NOARP)
410 return;
411
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800412 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return;
414
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700415 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300416 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -0300417 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
420 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
421 arp->ar_pro != htons(ETH_P_IP) ||
422 arp->ar_op != htons(ARPOP_REQUEST))
423 return;
424
Neil Horman47bbec02006-12-08 00:05:55 -0800425 arp_ptr = (unsigned char *)(arp+1);
426 /* save the location of the src hw addr */
427 sha = arp_ptr;
428 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 memcpy(&sip, arp_ptr, 4);
Neil Horman47bbec02006-12-08 00:05:55 -0800430 arp_ptr += 4;
431 /* if we actually cared about dst hw addr, it would get copied here */
432 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 memcpy(&tip, arp_ptr, 4);
434
435 /* Should we ignore arp? */
Harvey Harrisone7557af2009-03-28 15:38:31 +0000436 if (tip != np->local_ip ||
Joe Perches21cf2252007-12-16 13:44:00 -0800437 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 return;
439
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800440 size = arp_hdr_len(skb->dev);
Johannes Bergf5184d22008-05-12 20:48:31 -0700441 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 LL_RESERVED_SPACE(np->dev));
443
444 if (!send_skb)
445 return;
446
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700447 skb_reset_network_header(send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 arp = (struct arphdr *) skb_put(send_skb, size);
449 send_skb->dev = skb->dev;
450 send_skb->protocol = htons(ETH_P_ARP);
451
452 /* Fill the device header for the ARP frame */
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700453 if (dev_hard_header(send_skb, skb->dev, ptype,
Stephen Hemminger09538642007-11-19 19:23:29 -0800454 sha, np->dev->dev_addr,
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700455 send_skb->len) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 kfree_skb(send_skb);
457 return;
458 }
459
460 /*
461 * Fill out the arp protocol part.
462 *
463 * we only support ethernet device type,
464 * which (according to RFC 1390) should always equal 1 (Ethernet).
465 */
466
467 arp->ar_hrd = htons(np->dev->type);
468 arp->ar_pro = htons(ETH_P_IP);
469 arp->ar_hln = np->dev->addr_len;
470 arp->ar_pln = 4;
471 arp->ar_op = htons(type);
472
473 arp_ptr=(unsigned char *)(arp + 1);
474 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
475 arp_ptr += np->dev->addr_len;
476 memcpy(arp_ptr, &tip, 4);
477 arp_ptr += 4;
Neil Horman47bbec02006-12-08 00:05:55 -0800478 memcpy(arp_ptr, sha, np->dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 arp_ptr += np->dev->addr_len;
480 memcpy(arp_ptr, &sip, 4);
481
482 netpoll_send_skb(np, send_skb);
483}
484
485int __netpoll_rx(struct sk_buff *skb)
486{
487 int proto, len, ulen;
488 struct iphdr *iph;
489 struct udphdr *uh;
Neil Horman068c6e92006-06-26 00:04:27 -0700490 struct netpoll_info *npi = skb->dev->npinfo;
491 struct netpoll *np = npi->rx_np;
492
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700493 if (!np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 goto out;
495 if (skb->dev->type != ARPHRD_ETHER)
496 goto out;
497
David S. Millerd9452e92008-03-04 12:28:49 -0800498 /* check if netpoll clients need ARP */
YOSHIFUJI Hideaki724800d2007-03-25 20:13:04 -0700499 if (skb->protocol == htons(ETH_P_ARP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 atomic_read(&trapped)) {
Neil Horman068c6e92006-06-26 00:04:27 -0700501 skb_queue_tail(&npi->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 return 1;
503 }
504
505 proto = ntohs(eth_hdr(skb)->h_proto);
506 if (proto != ETH_P_IP)
507 goto out;
508 if (skb->pkt_type == PACKET_OTHERHOST)
509 goto out;
510 if (skb_shared(skb))
511 goto out;
512
513 iph = (struct iphdr *)skb->data;
514 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
515 goto out;
516 if (iph->ihl < 5 || iph->version != 4)
517 goto out;
518 if (!pskb_may_pull(skb, iph->ihl*4))
519 goto out;
520 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
521 goto out;
522
523 len = ntohs(iph->tot_len);
524 if (skb->len < len || len < iph->ihl*4)
525 goto out;
526
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700527 /*
528 * Our transport medium may have padded the buffer out.
529 * Now We trim to the true length of the frame.
530 */
531 if (pskb_trim_rcsum(skb, len))
532 goto out;
533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 if (iph->protocol != IPPROTO_UDP)
535 goto out;
536
537 len -= iph->ihl*4;
538 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
539 ulen = ntohs(uh->len);
540
541 if (ulen != len)
542 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800543 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 goto out;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000545 if (np->local_ip && np->local_ip != iph->daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 goto out;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000547 if (np->remote_ip && np->remote_ip != iph->saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 goto out;
549 if (np->local_port && np->local_port != ntohs(uh->dest))
550 goto out;
551
552 np->rx_hook(np, ntohs(uh->source),
553 (char *)(uh+1),
554 ulen - sizeof(struct udphdr));
555
556 kfree_skb(skb);
557 return 1;
558
559out:
560 if (atomic_read(&trapped)) {
561 kfree_skb(skb);
562 return 1;
563 }
564
565 return 0;
566}
567
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700568void netpoll_print_options(struct netpoll *np)
569{
570 printk(KERN_INFO "%s: local port %d\n",
571 np->name, np->local_port);
Harvey Harrisone7557af2009-03-28 15:38:31 +0000572 printk(KERN_INFO "%s: local IP %pI4\n",
573 np->name, &np->local_ip);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700574 printk(KERN_INFO "%s: interface %s\n",
575 np->name, np->dev_name);
576 printk(KERN_INFO "%s: remote port %d\n",
577 np->name, np->remote_port);
Harvey Harrisone7557af2009-03-28 15:38:31 +0000578 printk(KERN_INFO "%s: remote IP %pI4\n",
579 np->name, &np->remote_ip);
Johannes Berge1749612008-10-27 15:59:26 -0700580 printk(KERN_INFO "%s: remote ethernet address %pM\n",
581 np->name, np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700582}
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584int netpoll_parse_options(struct netpoll *np, char *opt)
585{
586 char *cur=opt, *delim;
587
David S. Millerc68b9072006-11-14 20:40:49 -0800588 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if ((delim = strchr(cur, '@')) == NULL)
590 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800591 *delim = 0;
592 np->local_port = simple_strtol(cur, NULL, 10);
593 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
595 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
David S. Millerc68b9072006-11-14 20:40:49 -0800597 if (*cur != '/') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 if ((delim = strchr(cur, '/')) == NULL)
599 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800600 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000601 np->local_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800602 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 }
604 cur++;
605
David S. Millerc68b9072006-11-14 20:40:49 -0800606 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 /* parse out dev name */
608 if ((delim = strchr(cur, ',')) == NULL)
609 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800610 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800612 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 }
614 cur++;
615
David S. Millerc68b9072006-11-14 20:40:49 -0800616 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 /* dst port */
618 if ((delim = strchr(cur, '@')) == NULL)
619 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800620 *delim = 0;
621 np->remote_port = simple_strtol(cur, NULL, 10);
622 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 }
624 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626 /* dst ip */
627 if ((delim = strchr(cur, '/')) == NULL)
628 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800629 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000630 np->remote_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800631 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
David S. Millerc68b9072006-11-14 20:40:49 -0800633 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 /* MAC address */
635 if ((delim = strchr(cur, ':')) == NULL)
636 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800637 *delim = 0;
638 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
639 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 if ((delim = strchr(cur, ':')) == NULL)
641 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800642 *delim = 0;
643 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
644 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if ((delim = strchr(cur, ':')) == NULL)
646 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800647 *delim = 0;
648 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
649 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if ((delim = strchr(cur, ':')) == NULL)
651 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800652 *delim = 0;
653 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
654 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 if ((delim = strchr(cur, ':')) == NULL)
656 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800657 *delim = 0;
658 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
659 cur = delim + 1;
660 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 }
662
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700663 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
665 return 0;
666
667 parse_failed:
668 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
669 np->name, cur);
670 return -1;
671}
672
673int netpoll_setup(struct netpoll *np)
674{
675 struct net_device *ndev = NULL;
676 struct in_device *in_dev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700677 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700678 unsigned long flags;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700679 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
681 if (np->dev_name)
Eric W. Biederman881d9662007-09-17 11:56:21 -0700682 ndev = dev_get_by_name(&init_net, np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (!ndev) {
684 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
685 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700686 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 }
688
689 np->dev = ndev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700690 if (!ndev->npinfo) {
691 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700692 if (!npinfo) {
693 err = -ENOMEM;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700694 goto release;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700695 }
Jeff Moyer115c1d62005-06-22 22:05:31 -0700696
David S. Millerd9452e92008-03-04 12:28:49 -0800697 npinfo->rx_flags = 0;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700698 npinfo->rx_np = NULL;
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700699
Ingo Molnara9f6a0d2005-09-09 13:10:41 -0700700 spin_lock_init(&npinfo->rx_lock);
Neil Horman068c6e92006-06-26 00:04:27 -0700701 skb_queue_head_init(&npinfo->arp_tx);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700702 skb_queue_head_init(&npinfo->txq);
David Howells4c1ac1b2006-12-05 14:37:56 +0000703 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700704
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700705 atomic_set(&npinfo->refcnt, 1);
706 } else {
Jeff Moyer115c1d62005-06-22 22:05:31 -0700707 npinfo = ndev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700708 atomic_inc(&npinfo->refcnt);
709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800711 if (!ndev->netdev_ops->ndo_poll_controller) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
713 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700714 err = -ENOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 goto release;
716 }
717
718 if (!netif_running(ndev)) {
719 unsigned long atmost, atleast;
720
721 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
722 np->name, np->dev_name);
723
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800724 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700725 err = dev_open(ndev);
726 rtnl_unlock();
727
728 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 printk(KERN_ERR "%s: failed to open %s\n",
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700730 np->name, ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 goto release;
732 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 atleast = jiffies + HZ/10;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900735 atmost = jiffies + 4*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 while (!netif_carrier_ok(ndev)) {
737 if (time_after(jiffies, atmost)) {
738 printk(KERN_NOTICE
739 "%s: timeout waiting for carrier\n",
740 np->name);
741 break;
742 }
743 cond_resched();
744 }
745
746 /* If carrier appears to come up instantly, we don't
747 * trust it and pause so that we don't pump all our
748 * queued console messages into the bitbucket.
749 */
750
751 if (time_before(jiffies, atleast)) {
752 printk(KERN_NOTICE "%s: carrier detect appears"
753 " untrustworthy, waiting 4 seconds\n",
754 np->name);
755 msleep(4000);
756 }
757 }
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 if (!np->local_ip) {
760 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700761 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 if (!in_dev || !in_dev->ifa_list) {
764 rcu_read_unlock();
765 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
766 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700767 err = -EDESTADDRREQ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 goto release;
769 }
770
Harvey Harrisone7557af2009-03-28 15:38:31 +0000771 np->local_ip = in_dev->ifa_list->ifa_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 rcu_read_unlock();
Harvey Harrisone7557af2009-03-28 15:38:31 +0000773 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 }
775
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700776 if (np->rx_hook) {
777 spin_lock_irqsave(&npinfo->rx_lock, flags);
David S. Millerd9452e92008-03-04 12:28:49 -0800778 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700779 npinfo->rx_np = np;
780 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
781 }
Ingo Molnar26520762005-08-11 19:26:42 -0700782
783 /* fill up the skb queue */
784 refill_skbs();
785
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700786 /* last thing to do is link it to the net device structure */
Jeff Moyer115c1d62005-06-22 22:05:31 -0700787 ndev->npinfo = npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Matt Mackall53fb95d2005-08-11 19:27:43 -0700789 /* avoid racing with NAPI reading npinfo */
790 synchronize_rcu();
791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 return 0;
793
794 release:
Jeff Moyer115c1d62005-06-22 22:05:31 -0700795 if (!ndev->npinfo)
796 kfree(npinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 np->dev = NULL;
798 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700799 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
David S. Millerc68b9072006-11-14 20:40:49 -0800802static int __init netpoll_init(void)
803{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800804 skb_queue_head_init(&skb_pool);
805 return 0;
806}
807core_initcall(netpoll_init);
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809void netpoll_cleanup(struct netpoll *np)
810{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700811 struct netpoll_info *npinfo;
812 unsigned long flags;
813
Jeff Moyer115c1d62005-06-22 22:05:31 -0700814 if (np->dev) {
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700815 npinfo = np->dev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700816 if (npinfo) {
817 if (npinfo->rx_np == np) {
818 spin_lock_irqsave(&npinfo->rx_lock, flags);
819 npinfo->rx_np = NULL;
David S. Millerd9452e92008-03-04 12:28:49 -0800820 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700821 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
822 }
823
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700824 if (atomic_dec_and_test(&npinfo->refcnt)) {
825 skb_queue_purge(&npinfo->arp_tx);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900826 skb_queue_purge(&npinfo->txq);
Jarek Poplawski25442ca2007-07-05 17:42:44 -0700827 cancel_rearming_delayed_work(&npinfo->tx_work);
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700828
Jarek Poplawski17200812007-06-28 22:11:47 -0700829 /* clean after last, unfinished work */
Stephen Hemminger0adc9ad2007-11-19 19:15:03 -0800830 __skb_queue_purge(&npinfo->txq);
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700831 kfree(npinfo);
Satyam Sharma1498b3f2007-07-09 15:22:23 -0700832 np->dev->npinfo = NULL;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700833 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700834 }
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700835
Jeff Moyer115c1d62005-06-22 22:05:31 -0700836 dev_put(np->dev);
837 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 np->dev = NULL;
840}
841
842int netpoll_trap(void)
843{
844 return atomic_read(&trapped);
845}
846
847void netpoll_set_trap(int trap)
848{
849 if (trap)
850 atomic_inc(&trapped);
851 else
852 atomic_dec(&trapped);
853}
854
855EXPORT_SYMBOL(netpoll_set_trap);
856EXPORT_SYMBOL(netpoll_trap);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700857EXPORT_SYMBOL(netpoll_print_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858EXPORT_SYMBOL(netpoll_parse_options);
859EXPORT_SYMBOL(netpoll_setup);
860EXPORT_SYMBOL(netpoll_cleanup);
861EXPORT_SYMBOL(netpoll_send_udp);
862EXPORT_SYMBOL(netpoll_poll);