blob: 755414cd49d10c8d65c6366bceb2618edf261f75 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020015#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/inetdevice.h>
17#include <linux/inet.h>
18#include <linux/interrupt.h>
19#include <linux/netpoll.h>
20#include <linux/sched.h>
21#include <linux/delay.h>
22#include <linux/rcupdate.h>
23#include <linux/workqueue.h>
24#include <net/tcp.h>
25#include <net/udp.h>
26#include <asm/unaligned.h>
27
28/*
29 * We maintain a small pool of fully-sized skbs, to make sure the
30 * message gets out even in extreme OOM situations.
31 */
32
33#define MAX_UDP_CHUNK 1460
34#define MAX_SKBS 32
35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080037static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39static atomic_t trapped;
40
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070041#define USEC_PER_POLL 50
David S. Millerd9452e92008-03-04 12:28:49 -080042#define NETPOLL_RX_ENABLED 1
43#define NETPOLL_RX_DROP 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#define MAX_SKB_SIZE \
46 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
47 sizeof(struct iphdr) + sizeof(struct ethhdr))
48
49static void zap_completion_queue(void);
Neil Horman068c6e92006-06-26 00:04:27 -070050static void arp_reply(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
David Howellsc4028952006-11-22 14:57:56 +000052static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
David Howells4c1ac1b2006-12-05 14:37:56 +000054 struct netpoll_info *npinfo =
55 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010057 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070059 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -080061 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -070062 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070064 if (!netif_device_present(dev) || !netif_running(dev)) {
65 __kfree_skb(skb);
66 continue;
67 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
David S. Millerfd2ea0a2008-07-17 01:56:23 -070069 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
70
Ingo Molnar36405432006-12-12 17:20:42 +010071 local_irq_save(flags);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070072 __netif_tx_lock(txq, smp_processor_id());
73 if (netif_tx_queue_stopped(txq) ||
David S. Millerc3f26a22008-07-31 16:58:50 -070074 netif_tx_queue_frozen(txq) ||
Stephen Hemminger00829822008-11-20 20:14:53 -080075 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070076 skb_queue_head(&npinfo->txq, skb);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070077 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010078 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Jarek Poplawski25442ca2007-07-05 17:42:44 -070080 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070081 return;
82 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -070083 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010084 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 }
86}
87
Al Virob51655b2006-11-14 21:40:42 -080088static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
89 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Al Virod6f54932006-11-14 21:26:08 -080091 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -080092
Herbert Xu60476372007-04-09 11:59:39 -070093 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 return 0;
95
Herbert Xufb286bb2005-11-10 13:01:24 -080096 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Patrick McHardy84fa7932006-08-29 16:44:56 -070098 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -080099 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -0800100 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Herbert Xufb286bb2005-11-10 13:01:24 -0800102 skb->csum = psum;
103
104 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105}
106
107/*
108 * Check whether delayed processing was scheduled for our NIC. If so,
109 * we attempt to grab the poll lock and use ->poll() to pump the card.
110 * If this fails, either we've recursed in ->poll() or it's already
111 * running on another CPU.
112 *
113 * Note: we don't mask interrupts with this lock because we're using
114 * trylock here and interrupts are already disabled in the softirq
115 * case. Further, we test the poll_owner to avoid recursion on UP
116 * systems where the lock doesn't exist.
117 *
118 * In cases where there is bi-directional communications, reading only
119 * one message at a time can lead to packets being dropped by the
120 * network adapter, forcing superfluous retries and possibly timeouts.
121 * Thus, we set our budget to greater than 1.
122 */
David S. Miller0a7606c2007-10-29 21:28:47 -0700123static int poll_one_napi(struct netpoll_info *npinfo,
124 struct napi_struct *napi, int budget)
125{
126 int work;
127
128 /* net_rx_action's ->poll() invocations and our's are
129 * synchronized by this test which is only made while
130 * holding the napi->poll_lock.
131 */
132 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
133 return budget;
134
David S. Millerd9452e92008-03-04 12:28:49 -0800135 npinfo->rx_flags |= NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700136 atomic_inc(&trapped);
Neil Horman7b363e42008-12-09 23:22:26 -0800137 set_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700138
139 work = napi->poll(napi, budget);
140
Neil Horman7b363e42008-12-09 23:22:26 -0800141 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700142 atomic_dec(&trapped);
David S. Millerd9452e92008-03-04 12:28:49 -0800143 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700144
145 return budget - work;
146}
147
Stephen Hemminger51069302007-11-19 19:18:11 -0800148static void poll_napi(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700150 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 int budget = 16;
152
Stephen Hemminger51069302007-11-19 19:18:11 -0800153 list_for_each_entry(napi, &dev->napi_list, dev_list) {
David S. Miller0a7606c2007-10-29 21:28:47 -0700154 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700155 spin_trylock(&napi->poll_lock)) {
Stephen Hemminger51069302007-11-19 19:18:11 -0800156 budget = poll_one_napi(dev->npinfo, napi, budget);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700157 spin_unlock(&napi->poll_lock);
David S. Miller0a7606c2007-10-29 21:28:47 -0700158
159 if (!budget)
160 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 }
163}
164
Neil Horman068c6e92006-06-26 00:04:27 -0700165static void service_arp_queue(struct netpoll_info *npi)
166{
Stephen Hemminger51069302007-11-19 19:18:11 -0800167 if (npi) {
168 struct sk_buff *skb;
Neil Horman068c6e92006-06-26 00:04:27 -0700169
Stephen Hemminger51069302007-11-19 19:18:11 -0800170 while ((skb = skb_dequeue(&npi->arp_tx)))
171 arp_reply(skb);
Neil Horman068c6e92006-06-26 00:04:27 -0700172 }
Neil Horman068c6e92006-06-26 00:04:27 -0700173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175void netpoll_poll(struct netpoll *np)
176{
Stephen Hemminger51069302007-11-19 19:18:11 -0800177 struct net_device *dev = np->dev;
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800178 const struct net_device_ops *ops = dev->netdev_ops;
Stephen Hemminger51069302007-11-19 19:18:11 -0800179
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800180 if (!dev || !netif_running(dev) || !ops->ndo_poll_controller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 return;
182
183 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800184 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Stephen Hemminger51069302007-11-19 19:18:11 -0800186 poll_napi(dev);
187
188 service_arp_queue(dev->npinfo);
Neil Horman068c6e92006-06-26 00:04:27 -0700189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 zap_completion_queue();
191}
192
193static void refill_skbs(void)
194{
195 struct sk_buff *skb;
196 unsigned long flags;
197
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800198 spin_lock_irqsave(&skb_pool.lock, flags);
199 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
201 if (!skb)
202 break;
203
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800204 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800206 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209static void zap_completion_queue(void)
210{
211 unsigned long flags;
212 struct softnet_data *sd = &get_cpu_var(softnet_data);
213
214 if (sd->completion_queue) {
215 struct sk_buff *clist;
216
217 local_irq_save(flags);
218 clist = sd->completion_queue;
219 sd->completion_queue = NULL;
220 local_irq_restore(flags);
221
222 while (clist != NULL) {
223 struct sk_buff *skb = clist;
224 clist = clist->next;
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700225 if (skb->destructor) {
226 atomic_inc(&skb->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 dev_kfree_skb_any(skb); /* put this one back */
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700228 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 __kfree_skb(skb);
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
232 }
233
234 put_cpu_var(softnet_data);
235}
236
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800237static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800239 int count = 0;
240 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800243 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800247 if (!skb)
248 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800251 if (++count < 10) {
252 netpoll_poll(np);
253 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800255 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257
258 atomic_set(&skb->users, 1);
259 skb_reserve(skb, reserve);
260 return skb;
261}
262
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700263static int netpoll_owner_active(struct net_device *dev)
264{
265 struct napi_struct *napi;
266
267 list_for_each_entry(napi, &dev->napi_list, dev_list) {
268 if (napi->poll_owner == smp_processor_id())
269 return 1;
270 }
271 return 0;
272}
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
275{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700276 int status = NETDEV_TX_BUSY;
277 unsigned long tries;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900278 struct net_device *dev = np->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -0800279 const struct net_device_ops *ops = dev->netdev_ops;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900280 struct netpoll_info *npinfo = np->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900282 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
283 __kfree_skb(skb);
284 return;
285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700287 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700288 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700289 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800290 unsigned long flags;
291
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700292 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
293
Andrew Mortona49f99f2006-12-11 17:24:46 -0800294 local_irq_save(flags);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700295 /* try until next clock tick */
296 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
297 tries > 0; --tries) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700298 if (__netif_tx_trylock(txq)) {
299 if (!netif_tx_queue_stopped(txq))
Stephen Hemminger00829822008-11-20 20:14:53 -0800300 status = ops->ndo_start_xmit(skb, dev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700301 __netif_tx_unlock(txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700302
Andrew Mortone37b8d92006-12-09 14:01:49 -0800303 if (status == NETDEV_TX_OK)
304 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Andrew Mortone37b8d92006-12-09 14:01:49 -0800306 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700307
308 /* tickle device maybe there is some cleanup */
309 netpoll_poll(np);
310
311 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700312 }
Andrew Mortona49f99f2006-12-11 17:24:46 -0800313 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700316 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700317 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000318 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
322void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
323{
324 int total_len, eth_len, ip_len, udp_len;
325 struct sk_buff *skb;
326 struct udphdr *udph;
327 struct iphdr *iph;
328 struct ethhdr *eth;
329
330 udp_len = len + sizeof(*udph);
331 ip_len = eth_len = udp_len + sizeof(*iph);
332 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
333
334 skb = find_skb(np, total_len, total_len - len);
335 if (!skb)
336 return;
337
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300338 skb_copy_to_linear_data(skb, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 skb->len += len;
340
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300341 skb_push(skb, sizeof(*udph));
342 skb_reset_transport_header(skb);
343 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 udph->source = htons(np->local_port);
345 udph->dest = htons(np->remote_port);
346 udph->len = htons(udp_len);
347 udph->check = 0;
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800348 udph->check = csum_tcpudp_magic(htonl(np->local_ip),
349 htonl(np->remote_ip),
350 udp_len, IPPROTO_UDP,
Joe Perches07f07572008-11-19 15:44:53 -0800351 csum_partial(udph, udp_len, 0));
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800352 if (udph->check == 0)
Al Viro5e57dff2006-11-20 18:08:13 -0800353 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700355 skb_push(skb, sizeof(*iph));
356 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700357 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359 /* iph->version = 4; iph->ihl = 5; */
360 put_unaligned(0x45, (unsigned char *)iph);
361 iph->tos = 0;
362 put_unaligned(htons(ip_len), &(iph->tot_len));
363 iph->id = 0;
364 iph->frag_off = 0;
365 iph->ttl = 64;
366 iph->protocol = IPPROTO_UDP;
367 iph->check = 0;
368 put_unaligned(htonl(np->local_ip), &(iph->saddr));
369 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
370 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
371
372 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700373 skb_reset_mac_header(skb);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700374 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Stephen Hemminger09538642007-11-19 19:23:29 -0800375 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
376 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
378 skb->dev = np->dev;
379
380 netpoll_send_skb(np, skb);
381}
382
383static void arp_reply(struct sk_buff *skb)
384{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700385 struct netpoll_info *npinfo = skb->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 struct arphdr *arp;
387 unsigned char *arp_ptr;
388 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
Al Viro252e3342006-11-14 20:48:11 -0800389 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800390 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 struct sk_buff *send_skb;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700392 struct netpoll *np = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700394 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
395 np = npinfo->rx_np;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700396 if (!np)
397 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
399 /* No arp on this interface */
400 if (skb->dev->flags & IFF_NOARP)
401 return;
402
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800403 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 return;
405
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700406 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300407 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -0300408 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
411 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
412 arp->ar_pro != htons(ETH_P_IP) ||
413 arp->ar_op != htons(ARPOP_REQUEST))
414 return;
415
Neil Horman47bbec02006-12-08 00:05:55 -0800416 arp_ptr = (unsigned char *)(arp+1);
417 /* save the location of the src hw addr */
418 sha = arp_ptr;
419 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 memcpy(&sip, arp_ptr, 4);
Neil Horman47bbec02006-12-08 00:05:55 -0800421 arp_ptr += 4;
422 /* if we actually cared about dst hw addr, it would get copied here */
423 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 memcpy(&tip, arp_ptr, 4);
425
426 /* Should we ignore arp? */
Joe Perches21cf2252007-12-16 13:44:00 -0800427 if (tip != htonl(np->local_ip) ||
428 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 return;
430
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800431 size = arp_hdr_len(skb->dev);
Johannes Bergf5184d22008-05-12 20:48:31 -0700432 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 LL_RESERVED_SPACE(np->dev));
434
435 if (!send_skb)
436 return;
437
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700438 skb_reset_network_header(send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 arp = (struct arphdr *) skb_put(send_skb, size);
440 send_skb->dev = skb->dev;
441 send_skb->protocol = htons(ETH_P_ARP);
442
443 /* Fill the device header for the ARP frame */
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700444 if (dev_hard_header(send_skb, skb->dev, ptype,
Stephen Hemminger09538642007-11-19 19:23:29 -0800445 sha, np->dev->dev_addr,
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700446 send_skb->len) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 kfree_skb(send_skb);
448 return;
449 }
450
451 /*
452 * Fill out the arp protocol part.
453 *
454 * we only support ethernet device type,
455 * which (according to RFC 1390) should always equal 1 (Ethernet).
456 */
457
458 arp->ar_hrd = htons(np->dev->type);
459 arp->ar_pro = htons(ETH_P_IP);
460 arp->ar_hln = np->dev->addr_len;
461 arp->ar_pln = 4;
462 arp->ar_op = htons(type);
463
464 arp_ptr=(unsigned char *)(arp + 1);
465 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
466 arp_ptr += np->dev->addr_len;
467 memcpy(arp_ptr, &tip, 4);
468 arp_ptr += 4;
Neil Horman47bbec02006-12-08 00:05:55 -0800469 memcpy(arp_ptr, sha, np->dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 arp_ptr += np->dev->addr_len;
471 memcpy(arp_ptr, &sip, 4);
472
473 netpoll_send_skb(np, send_skb);
474}
475
476int __netpoll_rx(struct sk_buff *skb)
477{
478 int proto, len, ulen;
479 struct iphdr *iph;
480 struct udphdr *uh;
Neil Horman068c6e92006-06-26 00:04:27 -0700481 struct netpoll_info *npi = skb->dev->npinfo;
482 struct netpoll *np = npi->rx_np;
483
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700484 if (!np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 goto out;
486 if (skb->dev->type != ARPHRD_ETHER)
487 goto out;
488
David S. Millerd9452e92008-03-04 12:28:49 -0800489 /* check if netpoll clients need ARP */
YOSHIFUJI Hideaki724800d2007-03-25 20:13:04 -0700490 if (skb->protocol == htons(ETH_P_ARP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 atomic_read(&trapped)) {
Neil Horman068c6e92006-06-26 00:04:27 -0700492 skb_queue_tail(&npi->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 return 1;
494 }
495
496 proto = ntohs(eth_hdr(skb)->h_proto);
497 if (proto != ETH_P_IP)
498 goto out;
499 if (skb->pkt_type == PACKET_OTHERHOST)
500 goto out;
501 if (skb_shared(skb))
502 goto out;
503
504 iph = (struct iphdr *)skb->data;
505 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
506 goto out;
507 if (iph->ihl < 5 || iph->version != 4)
508 goto out;
509 if (!pskb_may_pull(skb, iph->ihl*4))
510 goto out;
511 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
512 goto out;
513
514 len = ntohs(iph->tot_len);
515 if (skb->len < len || len < iph->ihl*4)
516 goto out;
517
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700518 /*
519 * Our transport medium may have padded the buffer out.
520 * Now We trim to the true length of the frame.
521 */
522 if (pskb_trim_rcsum(skb, len))
523 goto out;
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 if (iph->protocol != IPPROTO_UDP)
526 goto out;
527
528 len -= iph->ihl*4;
529 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
530 ulen = ntohs(uh->len);
531
532 if (ulen != len)
533 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800534 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 goto out;
536 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
537 goto out;
538 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
539 goto out;
540 if (np->local_port && np->local_port != ntohs(uh->dest))
541 goto out;
542
543 np->rx_hook(np, ntohs(uh->source),
544 (char *)(uh+1),
545 ulen - sizeof(struct udphdr));
546
547 kfree_skb(skb);
548 return 1;
549
550out:
551 if (atomic_read(&trapped)) {
552 kfree_skb(skb);
553 return 1;
554 }
555
556 return 0;
557}
558
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700559void netpoll_print_options(struct netpoll *np)
560{
561 printk(KERN_INFO "%s: local port %d\n",
562 np->name, np->local_port);
563 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
564 np->name, HIPQUAD(np->local_ip));
565 printk(KERN_INFO "%s: interface %s\n",
566 np->name, np->dev_name);
567 printk(KERN_INFO "%s: remote port %d\n",
568 np->name, np->remote_port);
569 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
570 np->name, HIPQUAD(np->remote_ip));
Johannes Berge1749612008-10-27 15:59:26 -0700571 printk(KERN_INFO "%s: remote ethernet address %pM\n",
572 np->name, np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700573}
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575int netpoll_parse_options(struct netpoll *np, char *opt)
576{
577 char *cur=opt, *delim;
578
David S. Millerc68b9072006-11-14 20:40:49 -0800579 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 if ((delim = strchr(cur, '@')) == NULL)
581 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800582 *delim = 0;
583 np->local_port = simple_strtol(cur, NULL, 10);
584 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
586 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
David S. Millerc68b9072006-11-14 20:40:49 -0800588 if (*cur != '/') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if ((delim = strchr(cur, '/')) == NULL)
590 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800591 *delim = 0;
592 np->local_ip = ntohl(in_aton(cur));
593 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
595 cur++;
596
David S. Millerc68b9072006-11-14 20:40:49 -0800597 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 /* parse out dev name */
599 if ((delim = strchr(cur, ',')) == NULL)
600 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800601 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800603 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 }
605 cur++;
606
David S. Millerc68b9072006-11-14 20:40:49 -0800607 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 /* dst port */
609 if ((delim = strchr(cur, '@')) == NULL)
610 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800611 *delim = 0;
612 np->remote_port = simple_strtol(cur, NULL, 10);
613 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 }
615 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 /* dst ip */
618 if ((delim = strchr(cur, '/')) == NULL)
619 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800620 *delim = 0;
621 np->remote_ip = ntohl(in_aton(cur));
622 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
David S. Millerc68b9072006-11-14 20:40:49 -0800624 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 /* MAC address */
626 if ((delim = strchr(cur, ':')) == NULL)
627 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800628 *delim = 0;
629 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
630 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 if ((delim = strchr(cur, ':')) == NULL)
632 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800633 *delim = 0;
634 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
635 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 if ((delim = strchr(cur, ':')) == NULL)
637 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800638 *delim = 0;
639 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
640 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 if ((delim = strchr(cur, ':')) == NULL)
642 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800643 *delim = 0;
644 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
645 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if ((delim = strchr(cur, ':')) == NULL)
647 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800648 *delim = 0;
649 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
650 cur = delim + 1;
651 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 }
653
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700654 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 return 0;
657
658 parse_failed:
659 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
660 np->name, cur);
661 return -1;
662}
663
664int netpoll_setup(struct netpoll *np)
665{
666 struct net_device *ndev = NULL;
667 struct in_device *in_dev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700668 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700669 unsigned long flags;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700670 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
672 if (np->dev_name)
Eric W. Biederman881d9662007-09-17 11:56:21 -0700673 ndev = dev_get_by_name(&init_net, np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (!ndev) {
675 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
676 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700677 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679
680 np->dev = ndev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700681 if (!ndev->npinfo) {
682 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700683 if (!npinfo) {
684 err = -ENOMEM;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700685 goto release;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700686 }
Jeff Moyer115c1d62005-06-22 22:05:31 -0700687
David S. Millerd9452e92008-03-04 12:28:49 -0800688 npinfo->rx_flags = 0;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700689 npinfo->rx_np = NULL;
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700690
Ingo Molnara9f6a0d2005-09-09 13:10:41 -0700691 spin_lock_init(&npinfo->rx_lock);
Neil Horman068c6e92006-06-26 00:04:27 -0700692 skb_queue_head_init(&npinfo->arp_tx);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700693 skb_queue_head_init(&npinfo->txq);
David Howells4c1ac1b2006-12-05 14:37:56 +0000694 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700695
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700696 atomic_set(&npinfo->refcnt, 1);
697 } else {
Jeff Moyer115c1d62005-06-22 22:05:31 -0700698 npinfo = ndev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700699 atomic_inc(&npinfo->refcnt);
700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800702 if (!ndev->netdev_ops->ndo_poll_controller) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
704 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700705 err = -ENOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 goto release;
707 }
708
709 if (!netif_running(ndev)) {
710 unsigned long atmost, atleast;
711
712 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
713 np->name, np->dev_name);
714
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800715 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700716 err = dev_open(ndev);
717 rtnl_unlock();
718
719 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 printk(KERN_ERR "%s: failed to open %s\n",
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700721 np->name, ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 goto release;
723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725 atleast = jiffies + HZ/10;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900726 atmost = jiffies + 4*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 while (!netif_carrier_ok(ndev)) {
728 if (time_after(jiffies, atmost)) {
729 printk(KERN_NOTICE
730 "%s: timeout waiting for carrier\n",
731 np->name);
732 break;
733 }
734 cond_resched();
735 }
736
737 /* If carrier appears to come up instantly, we don't
738 * trust it and pause so that we don't pump all our
739 * queued console messages into the bitbucket.
740 */
741
742 if (time_before(jiffies, atleast)) {
743 printk(KERN_NOTICE "%s: carrier detect appears"
744 " untrustworthy, waiting 4 seconds\n",
745 np->name);
746 msleep(4000);
747 }
748 }
749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (!np->local_ip) {
751 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700752 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 if (!in_dev || !in_dev->ifa_list) {
755 rcu_read_unlock();
756 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
757 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700758 err = -EDESTADDRREQ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 goto release;
760 }
761
762 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
763 rcu_read_unlock();
764 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
765 np->name, HIPQUAD(np->local_ip));
766 }
767
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700768 if (np->rx_hook) {
769 spin_lock_irqsave(&npinfo->rx_lock, flags);
David S. Millerd9452e92008-03-04 12:28:49 -0800770 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700771 npinfo->rx_np = np;
772 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
773 }
Ingo Molnar26520762005-08-11 19:26:42 -0700774
775 /* fill up the skb queue */
776 refill_skbs();
777
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700778 /* last thing to do is link it to the net device structure */
Jeff Moyer115c1d62005-06-22 22:05:31 -0700779 ndev->npinfo = npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Matt Mackall53fb95d2005-08-11 19:27:43 -0700781 /* avoid racing with NAPI reading npinfo */
782 synchronize_rcu();
783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return 0;
785
786 release:
Jeff Moyer115c1d62005-06-22 22:05:31 -0700787 if (!ndev->npinfo)
788 kfree(npinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 np->dev = NULL;
790 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700791 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792}
793
David S. Millerc68b9072006-11-14 20:40:49 -0800794static int __init netpoll_init(void)
795{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800796 skb_queue_head_init(&skb_pool);
797 return 0;
798}
799core_initcall(netpoll_init);
800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801void netpoll_cleanup(struct netpoll *np)
802{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700803 struct netpoll_info *npinfo;
804 unsigned long flags;
805
Jeff Moyer115c1d62005-06-22 22:05:31 -0700806 if (np->dev) {
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700807 npinfo = np->dev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700808 if (npinfo) {
809 if (npinfo->rx_np == np) {
810 spin_lock_irqsave(&npinfo->rx_lock, flags);
811 npinfo->rx_np = NULL;
David S. Millerd9452e92008-03-04 12:28:49 -0800812 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700813 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
814 }
815
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700816 if (atomic_dec_and_test(&npinfo->refcnt)) {
817 skb_queue_purge(&npinfo->arp_tx);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900818 skb_queue_purge(&npinfo->txq);
Jarek Poplawski25442ca2007-07-05 17:42:44 -0700819 cancel_rearming_delayed_work(&npinfo->tx_work);
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700820
Jarek Poplawski17200812007-06-28 22:11:47 -0700821 /* clean after last, unfinished work */
Stephen Hemminger0adc9ad2007-11-19 19:15:03 -0800822 __skb_queue_purge(&npinfo->txq);
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700823 kfree(npinfo);
Satyam Sharma1498b3f2007-07-09 15:22:23 -0700824 np->dev->npinfo = NULL;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700825 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700826 }
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700827
Jeff Moyer115c1d62005-06-22 22:05:31 -0700828 dev_put(np->dev);
829 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 np->dev = NULL;
832}
833
834int netpoll_trap(void)
835{
836 return atomic_read(&trapped);
837}
838
839void netpoll_set_trap(int trap)
840{
841 if (trap)
842 atomic_inc(&trapped);
843 else
844 atomic_dec(&trapped);
845}
846
847EXPORT_SYMBOL(netpoll_set_trap);
848EXPORT_SYMBOL(netpoll_trap);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700849EXPORT_SYMBOL(netpoll_print_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850EXPORT_SYMBOL(netpoll_parse_options);
851EXPORT_SYMBOL(netpoll_setup);
852EXPORT_SYMBOL(netpoll_cleanup);
853EXPORT_SYMBOL(netpoll_send_udp);
854EXPORT_SYMBOL(netpoll_poll);