blob: 0d1de3c47a01276aeccb3c9c611ec7e00acaf614 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
12#include <linux/smp_lock.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020016#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
25#include <net/tcp.h>
26#include <net/udp.h>
27#include <asm/unaligned.h>
28
29/*
30 * We maintain a small pool of fully-sized skbs, to make sure the
31 * message gets out even in extreme OOM situations.
32 */
33
34#define MAX_UDP_CHUNK 1460
35#define MAX_SKBS 32
36#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080038static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040static atomic_t trapped;
41
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070042#define USEC_PER_POLL 50
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#define NETPOLL_RX_ENABLED 1
44#define NETPOLL_RX_DROP 2
45
46#define MAX_SKB_SIZE \
47 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
48 sizeof(struct iphdr) + sizeof(struct ethhdr))
49
50static void zap_completion_queue(void);
Neil Horman068c6e92006-06-26 00:04:27 -070051static void arp_reply(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53static void queue_process(void *p)
54{
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070055 struct netpoll_info *npinfo = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 struct sk_buff *skb;
57
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070058 while ((skb = skb_dequeue(&npinfo->txq))) {
59 struct net_device *dev = skb->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070061 if (!netif_device_present(dev) || !netif_running(dev)) {
62 __kfree_skb(skb);
63 continue;
64 }
65
66 netif_tx_lock_bh(dev);
67 if (netif_queue_stopped(dev) ||
68 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
69 skb_queue_head(&npinfo->txq, skb);
70 netif_tx_unlock_bh(dev);
71
72 schedule_delayed_work(&npinfo->tx_work, HZ/10);
73 return;
74 }
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070075
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070076 netif_tx_unlock_bh(dev);
77 }
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -070078}
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Linus Torvalds1da177e2005-04-16 15:20:36 -070080static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
81 unsigned short ulen, u32 saddr, u32 daddr)
82{
Herbert Xufb286bb2005-11-10 13:01:24 -080083 unsigned int psum;
84
85 if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 return 0;
87
Herbert Xufb286bb2005-11-10 13:01:24 -080088 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Patrick McHardy84fa7932006-08-29 16:44:56 -070090 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Herbert Xufb286bb2005-11-10 13:01:24 -080091 !(u16)csum_fold(csum_add(psum, skb->csum)))
92 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Herbert Xufb286bb2005-11-10 13:01:24 -080094 skb->csum = psum;
95
96 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99/*
100 * Check whether delayed processing was scheduled for our NIC. If so,
101 * we attempt to grab the poll lock and use ->poll() to pump the card.
102 * If this fails, either we've recursed in ->poll() or it's already
103 * running on another CPU.
104 *
105 * Note: we don't mask interrupts with this lock because we're using
106 * trylock here and interrupts are already disabled in the softirq
107 * case. Further, we test the poll_owner to avoid recursion on UP
108 * systems where the lock doesn't exist.
109 *
110 * In cases where there is bi-directional communications, reading only
111 * one message at a time can lead to packets being dropped by the
112 * network adapter, forcing superfluous retries and possibly timeouts.
113 * Thus, we set our budget to greater than 1.
114 */
115static void poll_napi(struct netpoll *np)
116{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700117 struct netpoll_info *npinfo = np->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 int budget = 16;
119
120 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
Jeff Moyer115c1d62005-06-22 22:05:31 -0700121 npinfo->poll_owner != smp_processor_id() &&
122 spin_trylock(&npinfo->poll_lock)) {
123 npinfo->rx_flags |= NETPOLL_RX_DROP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 atomic_inc(&trapped);
125
126 np->dev->poll(np->dev, &budget);
127
128 atomic_dec(&trapped);
Jeff Moyer115c1d62005-06-22 22:05:31 -0700129 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
130 spin_unlock(&npinfo->poll_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 }
132}
133
Neil Horman068c6e92006-06-26 00:04:27 -0700134static void service_arp_queue(struct netpoll_info *npi)
135{
136 struct sk_buff *skb;
137
138 if (unlikely(!npi))
139 return;
140
141 skb = skb_dequeue(&npi->arp_tx);
142
143 while (skb != NULL) {
144 arp_reply(skb);
145 skb = skb_dequeue(&npi->arp_tx);
146 }
147 return;
148}
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150void netpoll_poll(struct netpoll *np)
151{
152 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
153 return;
154
155 /* Process pending work on NIC */
156 np->dev->poll_controller(np->dev);
157 if (np->dev->poll)
158 poll_napi(np);
159
Neil Horman068c6e92006-06-26 00:04:27 -0700160 service_arp_queue(np->dev->npinfo);
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 zap_completion_queue();
163}
164
165static void refill_skbs(void)
166{
167 struct sk_buff *skb;
168 unsigned long flags;
169
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800170 spin_lock_irqsave(&skb_pool.lock, flags);
171 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
173 if (!skb)
174 break;
175
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800176 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800178 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
181static void zap_completion_queue(void)
182{
183 unsigned long flags;
184 struct softnet_data *sd = &get_cpu_var(softnet_data);
185
186 if (sd->completion_queue) {
187 struct sk_buff *clist;
188
189 local_irq_save(flags);
190 clist = sd->completion_queue;
191 sd->completion_queue = NULL;
192 local_irq_restore(flags);
193
194 while (clist != NULL) {
195 struct sk_buff *skb = clist;
196 clist = clist->next;
197 if(skb->destructor)
198 dev_kfree_skb_any(skb); /* put this one back */
199 else
200 __kfree_skb(skb);
201 }
202 }
203
204 put_cpu_var(softnet_data);
205}
206
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800207static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800209 int count = 0;
210 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800213 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800217 if (!skb)
218 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 if(!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800221 if (++count < 10) {
222 netpoll_poll(np);
223 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800225 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 }
227
228 atomic_set(&skb->users, 1);
229 skb_reserve(skb, reserve);
230 return skb;
231}
232
233static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
234{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700235 int status = NETDEV_TX_BUSY;
236 unsigned long tries;
237 struct net_device *dev = np->dev;
238 struct netpoll_info *npinfo = np->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700240 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
241 __kfree_skb(skb);
242 return;
243 }
244
245 /* don't get messages out of order, and no recursion */
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700246 if ( skb_queue_len(&npinfo->txq) == 0
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700247 && npinfo->poll_owner != smp_processor_id()
248 && netif_tx_trylock(dev)) {
249
250 /* try until next clock tick */
251 for(tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
252 if (!netif_queue_stopped(dev))
253 status = dev->hard_start_xmit(skb, dev);
254
255 if (status == NETDEV_TX_OK)
256 break;
257
258 /* tickle device maybe there is some cleanup */
259 netpoll_poll(np);
260
261 udelay(USEC_PER_POLL);
262 }
263 netif_tx_unlock(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
265
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700266 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700267 skb_queue_tail(&npinfo->txq, skb);
268 schedule_work(&npinfo->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
272void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
273{
274 int total_len, eth_len, ip_len, udp_len;
275 struct sk_buff *skb;
276 struct udphdr *udph;
277 struct iphdr *iph;
278 struct ethhdr *eth;
279
280 udp_len = len + sizeof(*udph);
281 ip_len = eth_len = udp_len + sizeof(*iph);
282 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
283
284 skb = find_skb(np, total_len, total_len - len);
285 if (!skb)
286 return;
287
288 memcpy(skb->data, msg, len);
289 skb->len += len;
290
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700291 skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 udph->source = htons(np->local_port);
293 udph->dest = htons(np->remote_port);
294 udph->len = htons(udp_len);
295 udph->check = 0;
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800296 udph->check = csum_tcpudp_magic(htonl(np->local_ip),
297 htonl(np->remote_ip),
298 udp_len, IPPROTO_UDP,
299 csum_partial((unsigned char *)udph, udp_len, 0));
300 if (udph->check == 0)
301 udph->check = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700303 skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 /* iph->version = 4; iph->ihl = 5; */
306 put_unaligned(0x45, (unsigned char *)iph);
307 iph->tos = 0;
308 put_unaligned(htons(ip_len), &(iph->tot_len));
309 iph->id = 0;
310 iph->frag_off = 0;
311 iph->ttl = 64;
312 iph->protocol = IPPROTO_UDP;
313 iph->check = 0;
314 put_unaligned(htonl(np->local_ip), &(iph->saddr));
315 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
316 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
317
318 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700319 skb->mac.raw = skb->data;
320 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 memcpy(eth->h_source, np->local_mac, 6);
322 memcpy(eth->h_dest, np->remote_mac, 6);
323
324 skb->dev = np->dev;
325
326 netpoll_send_skb(np, skb);
327}
328
329static void arp_reply(struct sk_buff *skb)
330{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700331 struct netpoll_info *npinfo = skb->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 struct arphdr *arp;
333 unsigned char *arp_ptr;
334 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
335 u32 sip, tip;
336 struct sk_buff *send_skb;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700337 struct netpoll *np = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700339 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
340 np = npinfo->rx_np;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700341 if (!np)
342 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* No arp on this interface */
345 if (skb->dev->flags & IFF_NOARP)
346 return;
347
348 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
349 (2 * skb->dev->addr_len) +
350 (2 * sizeof(u32)))))
351 return;
352
353 skb->h.raw = skb->nh.raw = skb->data;
354 arp = skb->nh.arph;
355
356 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
357 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
358 arp->ar_pro != htons(ETH_P_IP) ||
359 arp->ar_op != htons(ARPOP_REQUEST))
360 return;
361
362 arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
363 memcpy(&sip, arp_ptr, 4);
364 arp_ptr += 4 + skb->dev->addr_len;
365 memcpy(&tip, arp_ptr, 4);
366
367 /* Should we ignore arp? */
368 if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
369 return;
370
371 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
372 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
373 LL_RESERVED_SPACE(np->dev));
374
375 if (!send_skb)
376 return;
377
378 send_skb->nh.raw = send_skb->data;
379 arp = (struct arphdr *) skb_put(send_skb, size);
380 send_skb->dev = skb->dev;
381 send_skb->protocol = htons(ETH_P_ARP);
382
383 /* Fill the device header for the ARP frame */
384
385 if (np->dev->hard_header &&
386 np->dev->hard_header(send_skb, skb->dev, ptype,
387 np->remote_mac, np->local_mac,
388 send_skb->len) < 0) {
389 kfree_skb(send_skb);
390 return;
391 }
392
393 /*
394 * Fill out the arp protocol part.
395 *
396 * we only support ethernet device type,
397 * which (according to RFC 1390) should always equal 1 (Ethernet).
398 */
399
400 arp->ar_hrd = htons(np->dev->type);
401 arp->ar_pro = htons(ETH_P_IP);
402 arp->ar_hln = np->dev->addr_len;
403 arp->ar_pln = 4;
404 arp->ar_op = htons(type);
405
406 arp_ptr=(unsigned char *)(arp + 1);
407 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
408 arp_ptr += np->dev->addr_len;
409 memcpy(arp_ptr, &tip, 4);
410 arp_ptr += 4;
411 memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
412 arp_ptr += np->dev->addr_len;
413 memcpy(arp_ptr, &sip, 4);
414
415 netpoll_send_skb(np, send_skb);
416}
417
418int __netpoll_rx(struct sk_buff *skb)
419{
420 int proto, len, ulen;
421 struct iphdr *iph;
422 struct udphdr *uh;
Neil Horman068c6e92006-06-26 00:04:27 -0700423 struct netpoll_info *npi = skb->dev->npinfo;
424 struct netpoll *np = npi->rx_np;
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700427 if (!np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 goto out;
429 if (skb->dev->type != ARPHRD_ETHER)
430 goto out;
431
432 /* check if netpoll clients need ARP */
433 if (skb->protocol == __constant_htons(ETH_P_ARP) &&
434 atomic_read(&trapped)) {
Neil Horman068c6e92006-06-26 00:04:27 -0700435 skb_queue_tail(&npi->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 return 1;
437 }
438
439 proto = ntohs(eth_hdr(skb)->h_proto);
440 if (proto != ETH_P_IP)
441 goto out;
442 if (skb->pkt_type == PACKET_OTHERHOST)
443 goto out;
444 if (skb_shared(skb))
445 goto out;
446
447 iph = (struct iphdr *)skb->data;
448 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
449 goto out;
450 if (iph->ihl < 5 || iph->version != 4)
451 goto out;
452 if (!pskb_may_pull(skb, iph->ihl*4))
453 goto out;
454 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
455 goto out;
456
457 len = ntohs(iph->tot_len);
458 if (skb->len < len || len < iph->ihl*4)
459 goto out;
460
461 if (iph->protocol != IPPROTO_UDP)
462 goto out;
463
464 len -= iph->ihl*4;
465 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
466 ulen = ntohs(uh->len);
467
468 if (ulen != len)
469 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800470 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 goto out;
472 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
473 goto out;
474 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
475 goto out;
476 if (np->local_port && np->local_port != ntohs(uh->dest))
477 goto out;
478
479 np->rx_hook(np, ntohs(uh->source),
480 (char *)(uh+1),
481 ulen - sizeof(struct udphdr));
482
483 kfree_skb(skb);
484 return 1;
485
486out:
487 if (atomic_read(&trapped)) {
488 kfree_skb(skb);
489 return 1;
490 }
491
492 return 0;
493}
494
495int netpoll_parse_options(struct netpoll *np, char *opt)
496{
497 char *cur=opt, *delim;
498
499 if(*cur != '@') {
500 if ((delim = strchr(cur, '@')) == NULL)
501 goto parse_failed;
502 *delim=0;
503 np->local_port=simple_strtol(cur, NULL, 10);
504 cur=delim;
505 }
506 cur++;
507 printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
508
509 if(*cur != '/') {
510 if ((delim = strchr(cur, '/')) == NULL)
511 goto parse_failed;
512 *delim=0;
513 np->local_ip=ntohl(in_aton(cur));
514 cur=delim;
515
516 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
517 np->name, HIPQUAD(np->local_ip));
518 }
519 cur++;
520
521 if ( *cur != ',') {
522 /* parse out dev name */
523 if ((delim = strchr(cur, ',')) == NULL)
524 goto parse_failed;
525 *delim=0;
526 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
527 cur=delim;
528 }
529 cur++;
530
531 printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
532
533 if ( *cur != '@' ) {
534 /* dst port */
535 if ((delim = strchr(cur, '@')) == NULL)
536 goto parse_failed;
537 *delim=0;
538 np->remote_port=simple_strtol(cur, NULL, 10);
539 cur=delim;
540 }
541 cur++;
542 printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
543
544 /* dst ip */
545 if ((delim = strchr(cur, '/')) == NULL)
546 goto parse_failed;
547 *delim=0;
548 np->remote_ip=ntohl(in_aton(cur));
549 cur=delim+1;
550
551 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
552 np->name, HIPQUAD(np->remote_ip));
553
554 if( *cur != 0 )
555 {
556 /* MAC address */
557 if ((delim = strchr(cur, ':')) == NULL)
558 goto parse_failed;
559 *delim=0;
560 np->remote_mac[0]=simple_strtol(cur, NULL, 16);
561 cur=delim+1;
562 if ((delim = strchr(cur, ':')) == NULL)
563 goto parse_failed;
564 *delim=0;
565 np->remote_mac[1]=simple_strtol(cur, NULL, 16);
566 cur=delim+1;
567 if ((delim = strchr(cur, ':')) == NULL)
568 goto parse_failed;
569 *delim=0;
570 np->remote_mac[2]=simple_strtol(cur, NULL, 16);
571 cur=delim+1;
572 if ((delim = strchr(cur, ':')) == NULL)
573 goto parse_failed;
574 *delim=0;
575 np->remote_mac[3]=simple_strtol(cur, NULL, 16);
576 cur=delim+1;
577 if ((delim = strchr(cur, ':')) == NULL)
578 goto parse_failed;
579 *delim=0;
580 np->remote_mac[4]=simple_strtol(cur, NULL, 16);
581 cur=delim+1;
582 np->remote_mac[5]=simple_strtol(cur, NULL, 16);
583 }
584
585 printk(KERN_INFO "%s: remote ethernet address "
586 "%02x:%02x:%02x:%02x:%02x:%02x\n",
587 np->name,
588 np->remote_mac[0],
589 np->remote_mac[1],
590 np->remote_mac[2],
591 np->remote_mac[3],
592 np->remote_mac[4],
593 np->remote_mac[5]);
594
595 return 0;
596
597 parse_failed:
598 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
599 np->name, cur);
600 return -1;
601}
602
603int netpoll_setup(struct netpoll *np)
604{
605 struct net_device *ndev = NULL;
606 struct in_device *in_dev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700607 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700608 unsigned long flags;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700609 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 if (np->dev_name)
612 ndev = dev_get_by_name(np->dev_name);
613 if (!ndev) {
614 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
615 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700616 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 }
618
619 np->dev = ndev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700620 if (!ndev->npinfo) {
621 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700622 if (!npinfo) {
623 err = -ENOMEM;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700624 goto release;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700625 }
Jeff Moyer115c1d62005-06-22 22:05:31 -0700626
Jeff Moyer11513122005-08-11 19:23:04 -0700627 npinfo->rx_flags = 0;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700628 npinfo->rx_np = NULL;
Ingo Molnara9f6a0d2005-09-09 13:10:41 -0700629 spin_lock_init(&npinfo->poll_lock);
Jeff Moyer115c1d62005-06-22 22:05:31 -0700630 npinfo->poll_owner = -1;
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700631
Ingo Molnara9f6a0d2005-09-09 13:10:41 -0700632 spin_lock_init(&npinfo->rx_lock);
Neil Horman068c6e92006-06-26 00:04:27 -0700633 skb_queue_head_init(&npinfo->arp_tx);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700634 skb_queue_head_init(&npinfo->txq);
635 INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
636
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700637 atomic_set(&npinfo->refcnt, 1);
638 } else {
Jeff Moyer115c1d62005-06-22 22:05:31 -0700639 npinfo = ndev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700640 atomic_inc(&npinfo->refcnt);
641 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
643 if (!ndev->poll_controller) {
644 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
645 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700646 err = -ENOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 goto release;
648 }
649
650 if (!netif_running(ndev)) {
651 unsigned long atmost, atleast;
652
653 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
654 np->name, np->dev_name);
655
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800656 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700657 err = dev_open(ndev);
658 rtnl_unlock();
659
660 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 printk(KERN_ERR "%s: failed to open %s\n",
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700662 np->name, ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 goto release;
664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 atleast = jiffies + HZ/10;
667 atmost = jiffies + 4*HZ;
668 while (!netif_carrier_ok(ndev)) {
669 if (time_after(jiffies, atmost)) {
670 printk(KERN_NOTICE
671 "%s: timeout waiting for carrier\n",
672 np->name);
673 break;
674 }
675 cond_resched();
676 }
677
678 /* If carrier appears to come up instantly, we don't
679 * trust it and pause so that we don't pump all our
680 * queued console messages into the bitbucket.
681 */
682
683 if (time_before(jiffies, atleast)) {
684 printk(KERN_NOTICE "%s: carrier detect appears"
685 " untrustworthy, waiting 4 seconds\n",
686 np->name);
687 msleep(4000);
688 }
689 }
690
Kris Katterjohn38602882006-01-17 15:15:38 -0800691 if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 memcpy(np->local_mac, ndev->dev_addr, 6);
693
694 if (!np->local_ip) {
695 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700696 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 if (!in_dev || !in_dev->ifa_list) {
699 rcu_read_unlock();
700 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
701 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700702 err = -EDESTADDRREQ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 goto release;
704 }
705
706 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
707 rcu_read_unlock();
708 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
709 np->name, HIPQUAD(np->local_ip));
710 }
711
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700712 if (np->rx_hook) {
713 spin_lock_irqsave(&npinfo->rx_lock, flags);
714 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
715 npinfo->rx_np = np;
716 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
717 }
Ingo Molnar26520762005-08-11 19:26:42 -0700718
719 /* fill up the skb queue */
720 refill_skbs();
721
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700722 /* last thing to do is link it to the net device structure */
Jeff Moyer115c1d62005-06-22 22:05:31 -0700723 ndev->npinfo = npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Matt Mackall53fb95d2005-08-11 19:27:43 -0700725 /* avoid racing with NAPI reading npinfo */
726 synchronize_rcu();
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return 0;
729
730 release:
Jeff Moyer115c1d62005-06-22 22:05:31 -0700731 if (!ndev->npinfo)
732 kfree(npinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 np->dev = NULL;
734 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700735 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800738static int __init netpoll_init(void) {
739 skb_queue_head_init(&skb_pool);
740 return 0;
741}
742core_initcall(netpoll_init);
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744void netpoll_cleanup(struct netpoll *np)
745{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700746 struct netpoll_info *npinfo;
747 unsigned long flags;
748
Jeff Moyer115c1d62005-06-22 22:05:31 -0700749 if (np->dev) {
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700750 npinfo = np->dev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700751 if (npinfo) {
752 if (npinfo->rx_np == np) {
753 spin_lock_irqsave(&npinfo->rx_lock, flags);
754 npinfo->rx_np = NULL;
755 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
756 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
757 }
758
759 np->dev->npinfo = NULL;
760 if (atomic_dec_and_test(&npinfo->refcnt)) {
761 skb_queue_purge(&npinfo->arp_tx);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700762 skb_queue_purge(&npinfo->txq);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700763 cancel_rearming_delayed_work(&npinfo->tx_work);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700764 flush_scheduled_work();
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700765
766 kfree(npinfo);
767 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700768 }
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700769
Jeff Moyer115c1d62005-06-22 22:05:31 -0700770 dev_put(np->dev);
771 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 np->dev = NULL;
774}
775
776int netpoll_trap(void)
777{
778 return atomic_read(&trapped);
779}
780
781void netpoll_set_trap(int trap)
782{
783 if (trap)
784 atomic_inc(&trapped);
785 else
786 atomic_dec(&trapped);
787}
788
789EXPORT_SYMBOL(netpoll_set_trap);
790EXPORT_SYMBOL(netpoll_trap);
791EXPORT_SYMBOL(netpoll_parse_options);
792EXPORT_SYMBOL(netpoll_setup);
793EXPORT_SYMBOL(netpoll_cleanup);
794EXPORT_SYMBOL(netpoll_send_udp);
795EXPORT_SYMBOL(netpoll_poll);