blob: e13602d8154db3b3b02042ffc7c0081d1c04f76e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020015#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/inetdevice.h>
17#include <linux/inet.h>
18#include <linux/interrupt.h>
19#include <linux/netpoll.h>
20#include <linux/sched.h>
21#include <linux/delay.h>
22#include <linux/rcupdate.h>
23#include <linux/workqueue.h>
24#include <net/tcp.h>
25#include <net/udp.h>
26#include <asm/unaligned.h>
27
28/*
29 * We maintain a small pool of fully-sized skbs, to make sure the
30 * message gets out even in extreme OOM situations.
31 */
32
33#define MAX_UDP_CHUNK 1460
34#define MAX_SKBS 32
35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080037static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39static atomic_t trapped;
40
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070041#define USEC_PER_POLL 50
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#define NETPOLL_RX_ENABLED 1
43#define NETPOLL_RX_DROP 2
44
45#define MAX_SKB_SIZE \
46 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
47 sizeof(struct iphdr) + sizeof(struct ethhdr))
48
49static void zap_completion_queue(void);
Neil Horman068c6e92006-06-26 00:04:27 -070050static void arp_reply(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
David Howellsc4028952006-11-22 14:57:56 +000052static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
David Howells4c1ac1b2006-12-05 14:37:56 +000054 struct netpoll_info *npinfo =
55 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010057 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070059 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070062 if (!netif_device_present(dev) || !netif_running(dev)) {
63 __kfree_skb(skb);
64 continue;
65 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Ingo Molnar36405432006-12-12 17:20:42 +010067 local_irq_save(flags);
68 netif_tx_lock(dev);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -070069 if ((netif_queue_stopped(dev) ||
70 netif_subqueue_stopped(dev, skb->queue_mapping)) ||
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070072 skb_queue_head(&npinfo->txq, skb);
Ingo Molnar36405432006-12-12 17:20:42 +010073 netif_tx_unlock(dev);
74 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Jarek Poplawski25442ca2007-07-05 17:42:44 -070076 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070077 return;
78 }
Ingo Molnar36405432006-12-12 17:20:42 +010079 netif_tx_unlock(dev);
80 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 }
82}
83
Al Virob51655b2006-11-14 21:40:42 -080084static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
85 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
Al Virod6f5493c2006-11-14 21:26:08 -080087 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -080088
Herbert Xu60476372007-04-09 11:59:39 -070089 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 return 0;
91
Herbert Xufb286bb2005-11-10 13:01:24 -080092 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Patrick McHardy84fa7932006-08-29 16:44:56 -070094 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -080095 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -080096 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Herbert Xufb286bb2005-11-10 13:01:24 -080098 skb->csum = psum;
99
100 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101}
102
103/*
104 * Check whether delayed processing was scheduled for our NIC. If so,
105 * we attempt to grab the poll lock and use ->poll() to pump the card.
106 * If this fails, either we've recursed in ->poll() or it's already
107 * running on another CPU.
108 *
109 * Note: we don't mask interrupts with this lock because we're using
110 * trylock here and interrupts are already disabled in the softirq
111 * case. Further, we test the poll_owner to avoid recursion on UP
112 * systems where the lock doesn't exist.
113 *
114 * In cases where there is bi-directional communications, reading only
115 * one message at a time can lead to packets being dropped by the
116 * network adapter, forcing superfluous retries and possibly timeouts.
117 * Thus, we set our budget to greater than 1.
118 */
119static void poll_napi(struct netpoll *np)
120{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700121 struct netpoll_info *npinfo = np->dev->npinfo;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700122 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 int budget = 16;
124
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700125 list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
126 if (test_bit(NAPI_STATE_SCHED, &napi->state) &&
127 napi->poll_owner != smp_processor_id() &&
128 spin_trylock(&napi->poll_lock)) {
129 npinfo->rx_flags |= NETPOLL_RX_DROP;
130 atomic_inc(&trapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700132 napi->poll(napi, budget);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700134 atomic_dec(&trapped);
135 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
136 spin_unlock(&napi->poll_lock);
137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 }
139}
140
Neil Horman068c6e92006-06-26 00:04:27 -0700141static void service_arp_queue(struct netpoll_info *npi)
142{
143 struct sk_buff *skb;
144
145 if (unlikely(!npi))
146 return;
147
148 skb = skb_dequeue(&npi->arp_tx);
149
150 while (skb != NULL) {
151 arp_reply(skb);
152 skb = skb_dequeue(&npi->arp_tx);
153 }
Neil Horman068c6e92006-06-26 00:04:27 -0700154}
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156void netpoll_poll(struct netpoll *np)
157{
David S. Millerc68b9072006-11-14 20:40:49 -0800158 if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return;
160
161 /* Process pending work on NIC */
162 np->dev->poll_controller(np->dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700163 if (!list_empty(&np->dev->napi_list))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 poll_napi(np);
165
Neil Horman068c6e92006-06-26 00:04:27 -0700166 service_arp_queue(np->dev->npinfo);
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 zap_completion_queue();
169}
170
171static void refill_skbs(void)
172{
173 struct sk_buff *skb;
174 unsigned long flags;
175
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800176 spin_lock_irqsave(&skb_pool.lock, flags);
177 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
179 if (!skb)
180 break;
181
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800182 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800184 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
187static void zap_completion_queue(void)
188{
189 unsigned long flags;
190 struct softnet_data *sd = &get_cpu_var(softnet_data);
191
192 if (sd->completion_queue) {
193 struct sk_buff *clist;
194
195 local_irq_save(flags);
196 clist = sd->completion_queue;
197 sd->completion_queue = NULL;
198 local_irq_restore(flags);
199
200 while (clist != NULL) {
201 struct sk_buff *skb = clist;
202 clist = clist->next;
David S. Millerc68b9072006-11-14 20:40:49 -0800203 if (skb->destructor)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 dev_kfree_skb_any(skb); /* put this one back */
205 else
206 __kfree_skb(skb);
207 }
208 }
209
210 put_cpu_var(softnet_data);
211}
212
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800213static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800215 int count = 0;
216 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800219 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800223 if (!skb)
224 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800227 if (++count < 10) {
228 netpoll_poll(np);
229 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800231 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 }
233
234 atomic_set(&skb->users, 1);
235 skb_reserve(skb, reserve);
236 return skb;
237}
238
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700239static int netpoll_owner_active(struct net_device *dev)
240{
241 struct napi_struct *napi;
242
243 list_for_each_entry(napi, &dev->napi_list, dev_list) {
244 if (napi->poll_owner == smp_processor_id())
245 return 1;
246 }
247 return 0;
248}
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
251{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700252 int status = NETDEV_TX_BUSY;
253 unsigned long tries;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900254 struct net_device *dev = np->dev;
255 struct netpoll_info *npinfo = np->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900257 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
258 __kfree_skb(skb);
259 return;
260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700262 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700263 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
Andrew Mortona49f99f2006-12-11 17:24:46 -0800264 unsigned long flags;
265
266 local_irq_save(flags);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700267 /* try until next clock tick */
268 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
269 tries > 0; --tries) {
270 if (netif_tx_trylock(dev)) {
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700271 if (!netif_queue_stopped(dev) &&
272 !netif_subqueue_stopped(dev, skb->queue_mapping))
Andrew Mortone37b8d92006-12-09 14:01:49 -0800273 status = dev->hard_start_xmit(skb, dev);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700274 netif_tx_unlock(dev);
Matt Mackallf0d34592005-08-11 19:25:11 -0700275
Andrew Mortone37b8d92006-12-09 14:01:49 -0800276 if (status == NETDEV_TX_OK)
277 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Andrew Mortone37b8d92006-12-09 14:01:49 -0800279 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700280
281 /* tickle device maybe there is some cleanup */
282 netpoll_poll(np);
283
284 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700285 }
Andrew Mortona49f99f2006-12-11 17:24:46 -0800286 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700289 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700290 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000291 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
295void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
296{
297 int total_len, eth_len, ip_len, udp_len;
298 struct sk_buff *skb;
299 struct udphdr *udph;
300 struct iphdr *iph;
301 struct ethhdr *eth;
302
303 udp_len = len + sizeof(*udph);
304 ip_len = eth_len = udp_len + sizeof(*iph);
305 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
306
307 skb = find_skb(np, total_len, total_len - len);
308 if (!skb)
309 return;
310
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300311 skb_copy_to_linear_data(skb, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 skb->len += len;
313
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300314 skb_push(skb, sizeof(*udph));
315 skb_reset_transport_header(skb);
316 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 udph->source = htons(np->local_port);
318 udph->dest = htons(np->remote_port);
319 udph->len = htons(udp_len);
320 udph->check = 0;
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800321 udph->check = csum_tcpudp_magic(htonl(np->local_ip),
322 htonl(np->remote_ip),
323 udp_len, IPPROTO_UDP,
324 csum_partial((unsigned char *)udph, udp_len, 0));
325 if (udph->check == 0)
Al Viro5e57dff2006-11-20 18:08:13 -0800326 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700328 skb_push(skb, sizeof(*iph));
329 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700330 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 /* iph->version = 4; iph->ihl = 5; */
333 put_unaligned(0x45, (unsigned char *)iph);
334 iph->tos = 0;
335 put_unaligned(htons(ip_len), &(iph->tot_len));
336 iph->id = 0;
337 iph->frag_off = 0;
338 iph->ttl = 64;
339 iph->protocol = IPPROTO_UDP;
340 iph->check = 0;
341 put_unaligned(htonl(np->local_ip), &(iph->saddr));
342 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
343 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
344
345 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700346 skb_reset_mac_header(skb);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700347 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 memcpy(eth->h_source, np->local_mac, 6);
349 memcpy(eth->h_dest, np->remote_mac, 6);
350
351 skb->dev = np->dev;
352
353 netpoll_send_skb(np, skb);
354}
355
356static void arp_reply(struct sk_buff *skb)
357{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700358 struct netpoll_info *npinfo = skb->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 struct arphdr *arp;
360 unsigned char *arp_ptr;
361 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
Al Viro252e3342006-11-14 20:48:11 -0800362 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800363 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 struct sk_buff *send_skb;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700365 struct netpoll *np = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700367 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
368 np = npinfo->rx_np;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700369 if (!np)
370 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 /* No arp on this interface */
373 if (skb->dev->flags & IFF_NOARP)
374 return;
375
376 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
377 (2 * skb->dev->addr_len) +
378 (2 * sizeof(u32)))))
379 return;
380
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700381 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300382 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -0300383 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
386 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
387 arp->ar_pro != htons(ETH_P_IP) ||
388 arp->ar_op != htons(ARPOP_REQUEST))
389 return;
390
Neil Horman47bbec02006-12-08 00:05:55 -0800391 arp_ptr = (unsigned char *)(arp+1);
392 /* save the location of the src hw addr */
393 sha = arp_ptr;
394 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 memcpy(&sip, arp_ptr, 4);
Neil Horman47bbec02006-12-08 00:05:55 -0800396 arp_ptr += 4;
397 /* if we actually cared about dst hw addr, it would get copied here */
398 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 memcpy(&tip, arp_ptr, 4);
400
401 /* Should we ignore arp? */
402 if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
403 return;
404
405 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
406 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
407 LL_RESERVED_SPACE(np->dev));
408
409 if (!send_skb)
410 return;
411
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700412 skb_reset_network_header(send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 arp = (struct arphdr *) skb_put(send_skb, size);
414 send_skb->dev = skb->dev;
415 send_skb->protocol = htons(ETH_P_ARP);
416
417 /* Fill the device header for the ARP frame */
418
419 if (np->dev->hard_header &&
420 np->dev->hard_header(send_skb, skb->dev, ptype,
Neil Horman47bbec02006-12-08 00:05:55 -0800421 sha, np->local_mac,
David S. Millerc68b9072006-11-14 20:40:49 -0800422 send_skb->len) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 kfree_skb(send_skb);
424 return;
425 }
426
427 /*
428 * Fill out the arp protocol part.
429 *
430 * we only support ethernet device type,
431 * which (according to RFC 1390) should always equal 1 (Ethernet).
432 */
433
434 arp->ar_hrd = htons(np->dev->type);
435 arp->ar_pro = htons(ETH_P_IP);
436 arp->ar_hln = np->dev->addr_len;
437 arp->ar_pln = 4;
438 arp->ar_op = htons(type);
439
440 arp_ptr=(unsigned char *)(arp + 1);
441 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
442 arp_ptr += np->dev->addr_len;
443 memcpy(arp_ptr, &tip, 4);
444 arp_ptr += 4;
Neil Horman47bbec02006-12-08 00:05:55 -0800445 memcpy(arp_ptr, sha, np->dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 arp_ptr += np->dev->addr_len;
447 memcpy(arp_ptr, &sip, 4);
448
449 netpoll_send_skb(np, send_skb);
450}
451
452int __netpoll_rx(struct sk_buff *skb)
453{
454 int proto, len, ulen;
455 struct iphdr *iph;
456 struct udphdr *uh;
Neil Horman068c6e92006-06-26 00:04:27 -0700457 struct netpoll_info *npi = skb->dev->npinfo;
458 struct netpoll *np = npi->rx_np;
459
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700460 if (!np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 goto out;
462 if (skb->dev->type != ARPHRD_ETHER)
463 goto out;
464
465 /* check if netpoll clients need ARP */
YOSHIFUJI Hideaki724800d2007-03-25 20:13:04 -0700466 if (skb->protocol == htons(ETH_P_ARP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 atomic_read(&trapped)) {
Neil Horman068c6e92006-06-26 00:04:27 -0700468 skb_queue_tail(&npi->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 return 1;
470 }
471
472 proto = ntohs(eth_hdr(skb)->h_proto);
473 if (proto != ETH_P_IP)
474 goto out;
475 if (skb->pkt_type == PACKET_OTHERHOST)
476 goto out;
477 if (skb_shared(skb))
478 goto out;
479
480 iph = (struct iphdr *)skb->data;
481 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
482 goto out;
483 if (iph->ihl < 5 || iph->version != 4)
484 goto out;
485 if (!pskb_may_pull(skb, iph->ihl*4))
486 goto out;
487 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
488 goto out;
489
490 len = ntohs(iph->tot_len);
491 if (skb->len < len || len < iph->ihl*4)
492 goto out;
493
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700494 /*
495 * Our transport medium may have padded the buffer out.
496 * Now We trim to the true length of the frame.
497 */
498 if (pskb_trim_rcsum(skb, len))
499 goto out;
500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 if (iph->protocol != IPPROTO_UDP)
502 goto out;
503
504 len -= iph->ihl*4;
505 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
506 ulen = ntohs(uh->len);
507
508 if (ulen != len)
509 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800510 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 goto out;
512 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
513 goto out;
514 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
515 goto out;
516 if (np->local_port && np->local_port != ntohs(uh->dest))
517 goto out;
518
519 np->rx_hook(np, ntohs(uh->source),
520 (char *)(uh+1),
521 ulen - sizeof(struct udphdr));
522
523 kfree_skb(skb);
524 return 1;
525
526out:
527 if (atomic_read(&trapped)) {
528 kfree_skb(skb);
529 return 1;
530 }
531
532 return 0;
533}
534
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700535void netpoll_print_options(struct netpoll *np)
536{
Joe Perches0795af52007-10-03 17:59:30 -0700537 DECLARE_MAC_BUF(mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700538 printk(KERN_INFO "%s: local port %d\n",
539 np->name, np->local_port);
540 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
541 np->name, HIPQUAD(np->local_ip));
542 printk(KERN_INFO "%s: interface %s\n",
543 np->name, np->dev_name);
544 printk(KERN_INFO "%s: remote port %d\n",
545 np->name, np->remote_port);
546 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
547 np->name, HIPQUAD(np->remote_ip));
Joe Perches0795af52007-10-03 17:59:30 -0700548 printk(KERN_INFO "%s: remote ethernet address %s\n",
549 np->name, print_mac(mac, np->remote_mac));
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700550}
551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552int netpoll_parse_options(struct netpoll *np, char *opt)
553{
554 char *cur=opt, *delim;
555
David S. Millerc68b9072006-11-14 20:40:49 -0800556 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if ((delim = strchr(cur, '@')) == NULL)
558 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800559 *delim = 0;
560 np->local_port = simple_strtol(cur, NULL, 10);
561 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
563 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
David S. Millerc68b9072006-11-14 20:40:49 -0800565 if (*cur != '/') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 if ((delim = strchr(cur, '/')) == NULL)
567 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800568 *delim = 0;
569 np->local_ip = ntohl(in_aton(cur));
570 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
572 cur++;
573
David S. Millerc68b9072006-11-14 20:40:49 -0800574 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 /* parse out dev name */
576 if ((delim = strchr(cur, ',')) == NULL)
577 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800578 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800580 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 }
582 cur++;
583
David S. Millerc68b9072006-11-14 20:40:49 -0800584 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 /* dst port */
586 if ((delim = strchr(cur, '@')) == NULL)
587 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800588 *delim = 0;
589 np->remote_port = simple_strtol(cur, NULL, 10);
590 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 }
592 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
594 /* dst ip */
595 if ((delim = strchr(cur, '/')) == NULL)
596 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800597 *delim = 0;
598 np->remote_ip = ntohl(in_aton(cur));
599 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
David S. Millerc68b9072006-11-14 20:40:49 -0800601 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 /* MAC address */
603 if ((delim = strchr(cur, ':')) == NULL)
604 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800605 *delim = 0;
606 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
607 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 if ((delim = strchr(cur, ':')) == NULL)
609 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800610 *delim = 0;
611 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
612 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if ((delim = strchr(cur, ':')) == NULL)
614 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800615 *delim = 0;
616 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
617 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 if ((delim = strchr(cur, ':')) == NULL)
619 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800620 *delim = 0;
621 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
622 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 if ((delim = strchr(cur, ':')) == NULL)
624 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800625 *delim = 0;
626 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
627 cur = delim + 1;
628 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 }
630
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700631 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
633 return 0;
634
635 parse_failed:
636 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
637 np->name, cur);
638 return -1;
639}
640
641int netpoll_setup(struct netpoll *np)
642{
643 struct net_device *ndev = NULL;
644 struct in_device *in_dev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700645 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700646 unsigned long flags;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700647 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
649 if (np->dev_name)
Eric W. Biederman881d9662007-09-17 11:56:21 -0700650 ndev = dev_get_by_name(&init_net, np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 if (!ndev) {
652 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
653 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700654 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656
657 np->dev = ndev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700658 if (!ndev->npinfo) {
659 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700660 if (!npinfo) {
661 err = -ENOMEM;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700662 goto release;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700663 }
Jeff Moyer115c1d62005-06-22 22:05:31 -0700664
Jeff Moyer11513122005-08-11 19:23:04 -0700665 npinfo->rx_flags = 0;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700666 npinfo->rx_np = NULL;
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700667
Ingo Molnara9f6a0d2005-09-09 13:10:41 -0700668 spin_lock_init(&npinfo->rx_lock);
Neil Horman068c6e92006-06-26 00:04:27 -0700669 skb_queue_head_init(&npinfo->arp_tx);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700670 skb_queue_head_init(&npinfo->txq);
David Howells4c1ac1b2006-12-05 14:37:56 +0000671 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700672
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700673 atomic_set(&npinfo->refcnt, 1);
674 } else {
Jeff Moyer115c1d62005-06-22 22:05:31 -0700675 npinfo = ndev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700676 atomic_inc(&npinfo->refcnt);
677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 if (!ndev->poll_controller) {
680 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
681 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700682 err = -ENOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 goto release;
684 }
685
686 if (!netif_running(ndev)) {
687 unsigned long atmost, atleast;
688
689 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
690 np->name, np->dev_name);
691
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800692 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700693 err = dev_open(ndev);
694 rtnl_unlock();
695
696 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 printk(KERN_ERR "%s: failed to open %s\n",
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700698 np->name, ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 goto release;
700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 atleast = jiffies + HZ/10;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900703 atmost = jiffies + 4*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 while (!netif_carrier_ok(ndev)) {
705 if (time_after(jiffies, atmost)) {
706 printk(KERN_NOTICE
707 "%s: timeout waiting for carrier\n",
708 np->name);
709 break;
710 }
711 cond_resched();
712 }
713
714 /* If carrier appears to come up instantly, we don't
715 * trust it and pause so that we don't pump all our
716 * queued console messages into the bitbucket.
717 */
718
719 if (time_before(jiffies, atleast)) {
720 printk(KERN_NOTICE "%s: carrier detect appears"
721 " untrustworthy, waiting 4 seconds\n",
722 np->name);
723 msleep(4000);
724 }
725 }
726
Kris Katterjohn38602882006-01-17 15:15:38 -0800727 if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 memcpy(np->local_mac, ndev->dev_addr, 6);
729
730 if (!np->local_ip) {
731 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700732 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (!in_dev || !in_dev->ifa_list) {
735 rcu_read_unlock();
736 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
737 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700738 err = -EDESTADDRREQ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 goto release;
740 }
741
742 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
743 rcu_read_unlock();
744 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
745 np->name, HIPQUAD(np->local_ip));
746 }
747
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700748 if (np->rx_hook) {
749 spin_lock_irqsave(&npinfo->rx_lock, flags);
750 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
751 npinfo->rx_np = np;
752 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
753 }
Ingo Molnar26520762005-08-11 19:26:42 -0700754
755 /* fill up the skb queue */
756 refill_skbs();
757
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700758 /* last thing to do is link it to the net device structure */
Jeff Moyer115c1d62005-06-22 22:05:31 -0700759 ndev->npinfo = npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Matt Mackall53fb95d2005-08-11 19:27:43 -0700761 /* avoid racing with NAPI reading npinfo */
762 synchronize_rcu();
763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return 0;
765
766 release:
Jeff Moyer115c1d62005-06-22 22:05:31 -0700767 if (!ndev->npinfo)
768 kfree(npinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 np->dev = NULL;
770 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700771 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772}
773
David S. Millerc68b9072006-11-14 20:40:49 -0800774static int __init netpoll_init(void)
775{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800776 skb_queue_head_init(&skb_pool);
777 return 0;
778}
779core_initcall(netpoll_init);
780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781void netpoll_cleanup(struct netpoll *np)
782{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700783 struct netpoll_info *npinfo;
784 unsigned long flags;
785
Jeff Moyer115c1d62005-06-22 22:05:31 -0700786 if (np->dev) {
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700787 npinfo = np->dev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700788 if (npinfo) {
789 if (npinfo->rx_np == np) {
790 spin_lock_irqsave(&npinfo->rx_lock, flags);
791 npinfo->rx_np = NULL;
792 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
793 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
794 }
795
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700796 if (atomic_dec_and_test(&npinfo->refcnt)) {
797 skb_queue_purge(&npinfo->arp_tx);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900798 skb_queue_purge(&npinfo->txq);
Jarek Poplawski25442ca2007-07-05 17:42:44 -0700799 cancel_rearming_delayed_work(&npinfo->tx_work);
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700800
Jarek Poplawski17200812007-06-28 22:11:47 -0700801 /* clean after last, unfinished work */
802 if (!skb_queue_empty(&npinfo->txq)) {
803 struct sk_buff *skb;
804 skb = __skb_dequeue(&npinfo->txq);
805 kfree_skb(skb);
806 }
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700807 kfree(npinfo);
Satyam Sharma1498b3f2007-07-09 15:22:23 -0700808 np->dev->npinfo = NULL;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700809 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700810 }
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700811
Jeff Moyer115c1d62005-06-22 22:05:31 -0700812 dev_put(np->dev);
813 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 np->dev = NULL;
816}
817
818int netpoll_trap(void)
819{
820 return atomic_read(&trapped);
821}
822
823void netpoll_set_trap(int trap)
824{
825 if (trap)
826 atomic_inc(&trapped);
827 else
828 atomic_dec(&trapped);
829}
830
831EXPORT_SYMBOL(netpoll_set_trap);
832EXPORT_SYMBOL(netpoll_trap);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700833EXPORT_SYMBOL(netpoll_print_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834EXPORT_SYMBOL(netpoll_parse_options);
835EXPORT_SYMBOL(netpoll_setup);
836EXPORT_SYMBOL(netpoll_cleanup);
837EXPORT_SYMBOL(netpoll_send_udp);
838EXPORT_SYMBOL(netpoll_poll);