blob: 94825b109551e81b1c22a09459b5e0262a97d5e4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Anton Vorontsovbff38772009-07-08 11:10:56 -070012#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020016#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <net/tcp.h>
27#include <net/udp.h>
28#include <asm/unaligned.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -070029#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
32 * We maintain a small pool of fully-sized skbs, to make sure the
33 * message gets out even in extreme OOM situations.
34 */
35
36#define MAX_UDP_CHUNK 1460
37#define MAX_SKBS 32
38#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
39
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080040static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42static atomic_t trapped;
43
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070044#define USEC_PER_POLL 50
David S. Millerd9452e92008-03-04 12:28:49 -080045#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr))
51
52static void zap_completion_queue(void);
Neil Horman068c6e92006-06-26 00:04:27 -070053static void arp_reply(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Anton Vorontsovbff38772009-07-08 11:10:56 -070055static unsigned int carrier_timeout = 4;
56module_param(carrier_timeout, uint, 0644);
57
David Howellsc4028952006-11-22 14:57:56 +000058static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
David Howells4c1ac1b2006-12-05 14:37:56 +000060 struct netpoll_info *npinfo =
61 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010063 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070065 while ((skb = skb_dequeue(&npinfo->txq))) {
66 struct net_device *dev = skb->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -080067 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -070068 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070070 if (!netif_device_present(dev) || !netif_running(dev)) {
71 __kfree_skb(skb);
72 continue;
73 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
David S. Millerfd2ea0a2008-07-17 01:56:23 -070075 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
76
Ingo Molnar36405432006-12-12 17:20:42 +010077 local_irq_save(flags);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070078 __netif_tx_lock(txq, smp_processor_id());
79 if (netif_tx_queue_stopped(txq) ||
David S. Millerc3f26a22008-07-31 16:58:50 -070080 netif_tx_queue_frozen(txq) ||
Stephen Hemminger00829822008-11-20 20:14:53 -080081 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070082 skb_queue_head(&npinfo->txq, skb);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070083 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010084 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Jarek Poplawski25442ca2007-07-05 17:42:44 -070086 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070087 return;
88 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -070089 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010090 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 }
92}
93
Al Virob51655b2006-11-14 21:40:42 -080094static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
95 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Al Virod6f5493c2006-11-14 21:26:08 -080097 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -080098
Herbert Xu60476372007-04-09 11:59:39 -070099 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 return 0;
101
Herbert Xufb286bb2005-11-10 13:01:24 -0800102 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Patrick McHardy84fa7932006-08-29 16:44:56 -0700104 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -0800105 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -0800106 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Herbert Xufb286bb2005-11-10 13:01:24 -0800108 skb->csum = psum;
109
110 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112
113/*
114 * Check whether delayed processing was scheduled for our NIC. If so,
115 * we attempt to grab the poll lock and use ->poll() to pump the card.
116 * If this fails, either we've recursed in ->poll() or it's already
117 * running on another CPU.
118 *
119 * Note: we don't mask interrupts with this lock because we're using
120 * trylock here and interrupts are already disabled in the softirq
121 * case. Further, we test the poll_owner to avoid recursion on UP
122 * systems where the lock doesn't exist.
123 *
124 * In cases where there is bi-directional communications, reading only
125 * one message at a time can lead to packets being dropped by the
126 * network adapter, forcing superfluous retries and possibly timeouts.
127 * Thus, we set our budget to greater than 1.
128 */
David S. Miller0a7606c2007-10-29 21:28:47 -0700129static int poll_one_napi(struct netpoll_info *npinfo,
130 struct napi_struct *napi, int budget)
131{
132 int work;
133
134 /* net_rx_action's ->poll() invocations and our's are
135 * synchronized by this test which is only made while
136 * holding the napi->poll_lock.
137 */
138 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
139 return budget;
140
David S. Millerd9452e92008-03-04 12:28:49 -0800141 npinfo->rx_flags |= NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700142 atomic_inc(&trapped);
Neil Horman7b363e42008-12-09 23:22:26 -0800143 set_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700144
145 work = napi->poll(napi, budget);
David S. Miller7d18f112009-05-21 23:30:09 -0700146 trace_napi_poll(napi);
David S. Miller0a7606c2007-10-29 21:28:47 -0700147
Neil Horman7b363e42008-12-09 23:22:26 -0800148 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700149 atomic_dec(&trapped);
David S. Millerd9452e92008-03-04 12:28:49 -0800150 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700151
152 return budget - work;
153}
154
Stephen Hemminger51069302007-11-19 19:18:11 -0800155static void poll_napi(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700157 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 int budget = 16;
159
Stephen Hemminger51069302007-11-19 19:18:11 -0800160 list_for_each_entry(napi, &dev->napi_list, dev_list) {
David S. Miller0a7606c2007-10-29 21:28:47 -0700161 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700162 spin_trylock(&napi->poll_lock)) {
Stephen Hemminger51069302007-11-19 19:18:11 -0800163 budget = poll_one_napi(dev->npinfo, napi, budget);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700164 spin_unlock(&napi->poll_lock);
David S. Miller0a7606c2007-10-29 21:28:47 -0700165
166 if (!budget)
167 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 }
170}
171
Neil Horman068c6e92006-06-26 00:04:27 -0700172static void service_arp_queue(struct netpoll_info *npi)
173{
Stephen Hemminger51069302007-11-19 19:18:11 -0800174 if (npi) {
175 struct sk_buff *skb;
Neil Horman068c6e92006-06-26 00:04:27 -0700176
Stephen Hemminger51069302007-11-19 19:18:11 -0800177 while ((skb = skb_dequeue(&npi->arp_tx)))
178 arp_reply(skb);
Neil Horman068c6e92006-06-26 00:04:27 -0700179 }
Neil Horman068c6e92006-06-26 00:04:27 -0700180}
181
WANG Cong0e34e932010-05-06 00:47:21 -0700182void netpoll_poll_dev(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000184 const struct net_device_ops *ops;
Stephen Hemminger51069302007-11-19 19:18:11 -0800185
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000186 if (!dev || !netif_running(dev))
187 return;
188
189 ops = dev->netdev_ops;
190 if (!ops->ndo_poll_controller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 return;
192
193 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800194 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Stephen Hemminger51069302007-11-19 19:18:11 -0800196 poll_napi(dev);
197
198 service_arp_queue(dev->npinfo);
Neil Horman068c6e92006-06-26 00:04:27 -0700199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 zap_completion_queue();
201}
202
WANG Cong0e34e932010-05-06 00:47:21 -0700203void netpoll_poll(struct netpoll *np)
204{
205 netpoll_poll_dev(np->dev);
206}
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208static void refill_skbs(void)
209{
210 struct sk_buff *skb;
211 unsigned long flags;
212
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800213 spin_lock_irqsave(&skb_pool.lock, flags);
214 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
216 if (!skb)
217 break;
218
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800219 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800221 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
224static void zap_completion_queue(void)
225{
226 unsigned long flags;
227 struct softnet_data *sd = &get_cpu_var(softnet_data);
228
229 if (sd->completion_queue) {
230 struct sk_buff *clist;
231
232 local_irq_save(flags);
233 clist = sd->completion_queue;
234 sd->completion_queue = NULL;
235 local_irq_restore(flags);
236
237 while (clist != NULL) {
238 struct sk_buff *skb = clist;
239 clist = clist->next;
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700240 if (skb->destructor) {
241 atomic_inc(&skb->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 dev_kfree_skb_any(skb); /* put this one back */
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700243 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 __kfree_skb(skb);
Jarek Poplawski8a455b02008-03-20 16:07:27 -0700245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 }
247 }
248
249 put_cpu_var(softnet_data);
250}
251
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800252static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800254 int count = 0;
255 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800258 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800262 if (!skb)
263 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800266 if (++count < 10) {
267 netpoll_poll(np);
268 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800270 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 }
272
273 atomic_set(&skb->users, 1);
274 skb_reserve(skb, reserve);
275 return skb;
276}
277
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700278static int netpoll_owner_active(struct net_device *dev)
279{
280 struct napi_struct *napi;
281
282 list_for_each_entry(napi, &dev->napi_list, dev_list) {
283 if (napi->poll_owner == smp_processor_id())
284 return 1;
285 }
286 return 0;
287}
288
WANG Cong0e34e932010-05-06 00:47:21 -0700289void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700291 int status = NETDEV_TX_BUSY;
292 unsigned long tries;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900293 struct net_device *dev = np->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -0800294 const struct net_device_ops *ops = dev->netdev_ops;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900295 struct netpoll_info *npinfo = np->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900297 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
298 __kfree_skb(skb);
299 return;
300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700302 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700303 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700304 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800305 unsigned long flags;
306
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700307 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
308
Andrew Mortona49f99f2006-12-11 17:24:46 -0800309 local_irq_save(flags);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700310 /* try until next clock tick */
311 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
312 tries > 0; --tries) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700313 if (__netif_tx_trylock(txq)) {
Eric Dumazet08baf562009-05-25 22:58:01 -0700314 if (!netif_tx_queue_stopped(txq)) {
WANG Cong0e34e932010-05-06 00:47:21 -0700315 dev->priv_flags |= IFF_IN_NETPOLL;
Stephen Hemminger00829822008-11-20 20:14:53 -0800316 status = ops->ndo_start_xmit(skb, dev);
WANG Cong0e34e932010-05-06 00:47:21 -0700317 dev->priv_flags &= ~IFF_IN_NETPOLL;
Eric Dumazet08baf562009-05-25 22:58:01 -0700318 if (status == NETDEV_TX_OK)
319 txq_trans_update(txq);
320 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700321 __netif_tx_unlock(txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700322
Andrew Mortone37b8d92006-12-09 14:01:49 -0800323 if (status == NETDEV_TX_OK)
324 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Andrew Mortone37b8d92006-12-09 14:01:49 -0800326 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700327
328 /* tickle device maybe there is some cleanup */
329 netpoll_poll(np);
330
331 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700332 }
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000333
334 WARN_ONCE(!irqs_disabled(),
335 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
336 dev->name, ops->ndo_start_xmit);
337
Andrew Mortona49f99f2006-12-11 17:24:46 -0800338 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700341 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700342 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000343 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345}
346
347void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
348{
349 int total_len, eth_len, ip_len, udp_len;
350 struct sk_buff *skb;
351 struct udphdr *udph;
352 struct iphdr *iph;
353 struct ethhdr *eth;
354
355 udp_len = len + sizeof(*udph);
356 ip_len = eth_len = udp_len + sizeof(*iph);
357 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
358
359 skb = find_skb(np, total_len, total_len - len);
360 if (!skb)
361 return;
362
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300363 skb_copy_to_linear_data(skb, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 skb->len += len;
365
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300366 skb_push(skb, sizeof(*udph));
367 skb_reset_transport_header(skb);
368 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 udph->source = htons(np->local_port);
370 udph->dest = htons(np->remote_port);
371 udph->len = htons(udp_len);
372 udph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000373 udph->check = csum_tcpudp_magic(np->local_ip,
374 np->remote_ip,
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800375 udp_len, IPPROTO_UDP,
Joe Perches07f07572008-11-19 15:44:53 -0800376 csum_partial(udph, udp_len, 0));
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800377 if (udph->check == 0)
Al Viro5e57dff2006-11-20 18:08:13 -0800378 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700380 skb_push(skb, sizeof(*iph));
381 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700382 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 /* iph->version = 4; iph->ihl = 5; */
385 put_unaligned(0x45, (unsigned char *)iph);
386 iph->tos = 0;
387 put_unaligned(htons(ip_len), &(iph->tot_len));
388 iph->id = 0;
389 iph->frag_off = 0;
390 iph->ttl = 64;
391 iph->protocol = IPPROTO_UDP;
392 iph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000393 put_unaligned(np->local_ip, &(iph->saddr));
394 put_unaligned(np->remote_ip, &(iph->daddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
396
397 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700398 skb_reset_mac_header(skb);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700399 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Stephen Hemminger09538642007-11-19 19:23:29 -0800400 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
401 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403 skb->dev = np->dev;
404
405 netpoll_send_skb(np, skb);
406}
407
408static void arp_reply(struct sk_buff *skb)
409{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700410 struct netpoll_info *npinfo = skb->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 struct arphdr *arp;
412 unsigned char *arp_ptr;
413 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
Al Viro252e3342006-11-14 20:48:11 -0800414 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800415 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 struct sk_buff *send_skb;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000417 struct netpoll *np, *tmp;
418 unsigned long flags;
419 int hits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000421 if (list_empty(&npinfo->rx_np))
422 return;
423
424 /* Before checking the packet, we do some early
425 inspection whether this is interesting at all */
426 spin_lock_irqsave(&npinfo->rx_lock, flags);
427 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
428 if (np->dev == skb->dev)
429 hits++;
430 }
431 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
432
433 /* No netpoll struct is using this dev */
434 if (!hits)
Jeff Moyer115c1d62005-06-22 22:05:31 -0700435 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
437 /* No arp on this interface */
438 if (skb->dev->flags & IFF_NOARP)
439 return;
440
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800441 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 return;
443
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700444 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300445 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -0300446 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
449 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
450 arp->ar_pro != htons(ETH_P_IP) ||
451 arp->ar_op != htons(ARPOP_REQUEST))
452 return;
453
Neil Horman47bbec02006-12-08 00:05:55 -0800454 arp_ptr = (unsigned char *)(arp+1);
455 /* save the location of the src hw addr */
456 sha = arp_ptr;
457 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 memcpy(&sip, arp_ptr, 4);
Neil Horman47bbec02006-12-08 00:05:55 -0800459 arp_ptr += 4;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000460 /* If we actually cared about dst hw addr,
461 it would get copied here */
Neil Horman47bbec02006-12-08 00:05:55 -0800462 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 memcpy(&tip, arp_ptr, 4);
464
465 /* Should we ignore arp? */
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000466 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 return;
468
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800469 size = arp_hdr_len(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000471 spin_lock_irqsave(&npinfo->rx_lock, flags);
472 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
473 if (tip != np->local_ip)
474 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000476 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
477 LL_RESERVED_SPACE(np->dev));
478 if (!send_skb)
479 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000481 skb_reset_network_header(send_skb);
482 arp = (struct arphdr *) skb_put(send_skb, size);
483 send_skb->dev = skb->dev;
484 send_skb->protocol = htons(ETH_P_ARP);
485
486 /* Fill the device header for the ARP frame */
487 if (dev_hard_header(send_skb, skb->dev, ptype,
488 sha, np->dev->dev_addr,
489 send_skb->len) < 0) {
490 kfree_skb(send_skb);
491 continue;
492 }
493
494 /*
495 * Fill out the arp protocol part.
496 *
497 * we only support ethernet device type,
498 * which (according to RFC 1390) should
499 * always equal 1 (Ethernet).
500 */
501
502 arp->ar_hrd = htons(np->dev->type);
503 arp->ar_pro = htons(ETH_P_IP);
504 arp->ar_hln = np->dev->addr_len;
505 arp->ar_pln = 4;
506 arp->ar_op = htons(type);
507
508 arp_ptr = (unsigned char *)(arp + 1);
509 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
510 arp_ptr += np->dev->addr_len;
511 memcpy(arp_ptr, &tip, 4);
512 arp_ptr += 4;
513 memcpy(arp_ptr, sha, np->dev->addr_len);
514 arp_ptr += np->dev->addr_len;
515 memcpy(arp_ptr, &sip, 4);
516
517 netpoll_send_skb(np, send_skb);
518
519 /* If there are several rx_hooks for the same address,
520 we're fine by sending a single reply */
521 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 }
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000523 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524}
525
526int __netpoll_rx(struct sk_buff *skb)
527{
528 int proto, len, ulen;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000529 int hits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 struct iphdr *iph;
531 struct udphdr *uh;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000532 struct netpoll_info *npinfo = skb->dev->npinfo;
533 struct netpoll *np, *tmp;
Neil Horman068c6e92006-06-26 00:04:27 -0700534
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000535 if (list_empty(&npinfo->rx_np))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 goto out;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if (skb->dev->type != ARPHRD_ETHER)
539 goto out;
540
David S. Millerd9452e92008-03-04 12:28:49 -0800541 /* check if netpoll clients need ARP */
YOSHIFUJI Hideaki724800d2007-03-25 20:13:04 -0700542 if (skb->protocol == htons(ETH_P_ARP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 atomic_read(&trapped)) {
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000544 skb_queue_tail(&npinfo->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 return 1;
546 }
547
548 proto = ntohs(eth_hdr(skb)->h_proto);
549 if (proto != ETH_P_IP)
550 goto out;
551 if (skb->pkt_type == PACKET_OTHERHOST)
552 goto out;
553 if (skb_shared(skb))
554 goto out;
555
556 iph = (struct iphdr *)skb->data;
557 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
558 goto out;
559 if (iph->ihl < 5 || iph->version != 4)
560 goto out;
561 if (!pskb_may_pull(skb, iph->ihl*4))
562 goto out;
563 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
564 goto out;
565
566 len = ntohs(iph->tot_len);
567 if (skb->len < len || len < iph->ihl*4)
568 goto out;
569
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700570 /*
571 * Our transport medium may have padded the buffer out.
572 * Now We trim to the true length of the frame.
573 */
574 if (pskb_trim_rcsum(skb, len))
575 goto out;
576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (iph->protocol != IPPROTO_UDP)
578 goto out;
579
580 len -= iph->ihl*4;
581 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
582 ulen = ntohs(uh->len);
583
584 if (ulen != len)
585 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800586 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000589 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
590 if (np->local_ip && np->local_ip != iph->daddr)
591 continue;
592 if (np->remote_ip && np->remote_ip != iph->saddr)
593 continue;
594 if (np->local_port && np->local_port != ntohs(uh->dest))
595 continue;
596
597 np->rx_hook(np, ntohs(uh->source),
598 (char *)(uh+1),
599 ulen - sizeof(struct udphdr));
600 hits++;
601 }
602
603 if (!hits)
604 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606 kfree_skb(skb);
607 return 1;
608
609out:
610 if (atomic_read(&trapped)) {
611 kfree_skb(skb);
612 return 1;
613 }
614
615 return 0;
616}
617
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700618void netpoll_print_options(struct netpoll *np)
619{
620 printk(KERN_INFO "%s: local port %d\n",
621 np->name, np->local_port);
Harvey Harrisone7557af2009-03-28 15:38:31 +0000622 printk(KERN_INFO "%s: local IP %pI4\n",
623 np->name, &np->local_ip);
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000624 printk(KERN_INFO "%s: interface '%s'\n",
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700625 np->name, np->dev_name);
626 printk(KERN_INFO "%s: remote port %d\n",
627 np->name, np->remote_port);
Harvey Harrisone7557af2009-03-28 15:38:31 +0000628 printk(KERN_INFO "%s: remote IP %pI4\n",
629 np->name, &np->remote_ip);
Johannes Berge1749612008-10-27 15:59:26 -0700630 printk(KERN_INFO "%s: remote ethernet address %pM\n",
631 np->name, np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700632}
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634int netpoll_parse_options(struct netpoll *np, char *opt)
635{
636 char *cur=opt, *delim;
637
David S. Millerc68b9072006-11-14 20:40:49 -0800638 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 if ((delim = strchr(cur, '@')) == NULL)
640 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800641 *delim = 0;
642 np->local_port = simple_strtol(cur, NULL, 10);
643 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 }
645 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
David S. Millerc68b9072006-11-14 20:40:49 -0800647 if (*cur != '/') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if ((delim = strchr(cur, '/')) == NULL)
649 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800650 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000651 np->local_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800652 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654 cur++;
655
David S. Millerc68b9072006-11-14 20:40:49 -0800656 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 /* parse out dev name */
658 if ((delim = strchr(cur, ',')) == NULL)
659 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800660 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800662 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 }
664 cur++;
665
David S. Millerc68b9072006-11-14 20:40:49 -0800666 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 /* dst port */
668 if ((delim = strchr(cur, '@')) == NULL)
669 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800670 *delim = 0;
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000671 if (*cur == ' ' || *cur == '\t')
672 printk(KERN_INFO "%s: warning: whitespace"
673 "is not allowed\n", np->name);
David S. Millerc68b9072006-11-14 20:40:49 -0800674 np->remote_port = simple_strtol(cur, NULL, 10);
675 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 }
677 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 /* dst ip */
680 if ((delim = strchr(cur, '/')) == NULL)
681 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800682 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000683 np->remote_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800684 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
David S. Millerc68b9072006-11-14 20:40:49 -0800686 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 /* MAC address */
688 if ((delim = strchr(cur, ':')) == NULL)
689 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800690 *delim = 0;
691 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
692 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 if ((delim = strchr(cur, ':')) == NULL)
694 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800695 *delim = 0;
696 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
697 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 if ((delim = strchr(cur, ':')) == NULL)
699 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800700 *delim = 0;
701 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
702 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if ((delim = strchr(cur, ':')) == NULL)
704 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800705 *delim = 0;
706 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
707 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 if ((delim = strchr(cur, ':')) == NULL)
709 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800710 *delim = 0;
711 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
712 cur = delim + 1;
713 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 }
715
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700716 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
718 return 0;
719
720 parse_failed:
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000721 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 np->name, cur);
723 return -1;
724}
725
726int netpoll_setup(struct netpoll *np)
727{
728 struct net_device *ndev = NULL;
729 struct in_device *in_dev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700730 struct netpoll_info *npinfo;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000731 struct netpoll *npe, *tmp;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700732 unsigned long flags;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700733 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 if (np->dev_name)
Eric W. Biederman881d9662007-09-17 11:56:21 -0700736 ndev = dev_get_by_name(&init_net, np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (!ndev) {
738 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
739 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700740 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 }
742
743 np->dev = ndev;
Jeff Moyer115c1d62005-06-22 22:05:31 -0700744 if (!ndev->npinfo) {
745 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700746 if (!npinfo) {
747 err = -ENOMEM;
Jiri Slaby21edbb22010-03-16 05:29:54 +0000748 goto put;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700749 }
Jeff Moyer115c1d62005-06-22 22:05:31 -0700750
David S. Millerd9452e92008-03-04 12:28:49 -0800751 npinfo->rx_flags = 0;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000752 INIT_LIST_HEAD(&npinfo->rx_np);
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700753
Ingo Molnara9f6a0d2005-09-09 13:10:41 -0700754 spin_lock_init(&npinfo->rx_lock);
Neil Horman068c6e92006-06-26 00:04:27 -0700755 skb_queue_head_init(&npinfo->arp_tx);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700756 skb_queue_head_init(&npinfo->txq);
David Howells4c1ac1b2006-12-05 14:37:56 +0000757 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
Stephen Hemmingerb6cd27e2006-10-26 15:46:51 -0700758
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700759 atomic_set(&npinfo->refcnt, 1);
760 } else {
Jeff Moyer115c1d62005-06-22 22:05:31 -0700761 npinfo = ndev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700762 atomic_inc(&npinfo->refcnt);
763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
WANG Cong0e34e932010-05-06 00:47:21 -0700765 npinfo->netpoll = np;
766
767 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
768 !ndev->netdev_ops->ndo_poll_controller) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
770 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700771 err = -ENOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 goto release;
773 }
774
775 if (!netif_running(ndev)) {
776 unsigned long atmost, atleast;
777
778 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
779 np->name, np->dev_name);
780
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800781 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700782 err = dev_open(ndev);
783 rtnl_unlock();
784
785 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 printk(KERN_ERR "%s: failed to open %s\n",
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700787 np->name, ndev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 goto release;
789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 atleast = jiffies + HZ/10;
Anton Vorontsovbff38772009-07-08 11:10:56 -0700792 atmost = jiffies + carrier_timeout * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 while (!netif_carrier_ok(ndev)) {
794 if (time_after(jiffies, atmost)) {
795 printk(KERN_NOTICE
796 "%s: timeout waiting for carrier\n",
797 np->name);
798 break;
799 }
Anton Vorontsov1b614fb2009-07-08 20:09:44 -0700800 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 }
802
803 /* If carrier appears to come up instantly, we don't
804 * trust it and pause so that we don't pump all our
805 * queued console messages into the bitbucket.
806 */
807
808 if (time_before(jiffies, atleast)) {
809 printk(KERN_NOTICE "%s: carrier detect appears"
810 " untrustworthy, waiting 4 seconds\n",
811 np->name);
812 msleep(4000);
813 }
814 }
815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 if (!np->local_ip) {
817 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700818 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
820 if (!in_dev || !in_dev->ifa_list) {
821 rcu_read_unlock();
822 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
823 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700824 err = -EDESTADDRREQ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 goto release;
826 }
827
Harvey Harrisone7557af2009-03-28 15:38:31 +0000828 np->local_ip = in_dev->ifa_list->ifa_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 rcu_read_unlock();
Harvey Harrisone7557af2009-03-28 15:38:31 +0000830 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
832
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700833 if (np->rx_hook) {
834 spin_lock_irqsave(&npinfo->rx_lock, flags);
David S. Millerd9452e92008-03-04 12:28:49 -0800835 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000836 list_add_tail(&np->rx, &npinfo->rx_np);
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700837 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
838 }
Ingo Molnar26520762005-08-11 19:26:42 -0700839
840 /* fill up the skb queue */
841 refill_skbs();
842
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700843 /* last thing to do is link it to the net device structure */
Jeff Moyer115c1d62005-06-22 22:05:31 -0700844 ndev->npinfo = npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Matt Mackall53fb95d2005-08-11 19:27:43 -0700846 /* avoid racing with NAPI reading npinfo */
847 synchronize_rcu();
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 return 0;
850
851 release:
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000852 if (!ndev->npinfo) {
853 spin_lock_irqsave(&npinfo->rx_lock, flags);
854 list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
855 npe->dev = NULL;
856 }
857 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
858
Jeff Moyer115c1d62005-06-22 22:05:31 -0700859 kfree(npinfo);
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000860 }
Jiri Slaby21edbb22010-03-16 05:29:54 +0000861put:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700863 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
David S. Millerc68b9072006-11-14 20:40:49 -0800866static int __init netpoll_init(void)
867{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800868 skb_queue_head_init(&skb_pool);
869 return 0;
870}
871core_initcall(netpoll_init);
872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873void netpoll_cleanup(struct netpoll *np)
874{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700875 struct netpoll_info *npinfo;
876 unsigned long flags;
877
Jeff Moyer115c1d62005-06-22 22:05:31 -0700878 if (np->dev) {
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700879 npinfo = np->dev->npinfo;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700880 if (npinfo) {
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000881 if (!list_empty(&npinfo->rx_np)) {
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700882 spin_lock_irqsave(&npinfo->rx_lock, flags);
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000883 list_del(&np->rx);
884 if (list_empty(&npinfo->rx_np))
885 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700886 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
887 }
888
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700889 if (atomic_dec_and_test(&npinfo->refcnt)) {
WANG Cong0e34e932010-05-06 00:47:21 -0700890 const struct net_device_ops *ops;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700891 skb_queue_purge(&npinfo->arp_tx);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900892 skb_queue_purge(&npinfo->txq);
Jarek Poplawski25442ca2007-07-05 17:42:44 -0700893 cancel_rearming_delayed_work(&npinfo->tx_work);
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700894
Jarek Poplawski17200812007-06-28 22:11:47 -0700895 /* clean after last, unfinished work */
Stephen Hemminger0adc9ad2007-11-19 19:15:03 -0800896 __skb_queue_purge(&npinfo->txq);
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700897 kfree(npinfo);
WANG Cong0e34e932010-05-06 00:47:21 -0700898 ops = np->dev->netdev_ops;
899 if (ops->ndo_netpoll_cleanup)
900 ops->ndo_netpoll_cleanup(np->dev);
901 else
902 np->dev->npinfo = NULL;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700903 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700904 }
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700905
Jeff Moyer115c1d62005-06-22 22:05:31 -0700906 dev_put(np->dev);
907 }
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 np->dev = NULL;
910}
911
912int netpoll_trap(void)
913{
914 return atomic_read(&trapped);
915}
916
917void netpoll_set_trap(int trap)
918{
919 if (trap)
920 atomic_inc(&trapped);
921 else
922 atomic_dec(&trapped);
923}
924
WANG Cong0e34e932010-05-06 00:47:21 -0700925EXPORT_SYMBOL(netpoll_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926EXPORT_SYMBOL(netpoll_set_trap);
927EXPORT_SYMBOL(netpoll_trap);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700928EXPORT_SYMBOL(netpoll_print_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929EXPORT_SYMBOL(netpoll_parse_options);
930EXPORT_SYMBOL(netpoll_setup);
931EXPORT_SYMBOL(netpoll_cleanup);
932EXPORT_SYMBOL(netpoll_send_udp);
WANG Cong0e34e932010-05-06 00:47:21 -0700933EXPORT_SYMBOL(netpoll_poll_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934EXPORT_SYMBOL(netpoll_poll);