blob: 06be2431753e19a2383db73c2f2c3e63a33c6a13 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Anton Vorontsovbff38772009-07-08 11:10:56 -070012#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020016#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <net/tcp.h>
27#include <net/udp.h>
28#include <asm/unaligned.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -070029#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/*
32 * We maintain a small pool of fully-sized skbs, to make sure the
33 * message gets out even in extreme OOM situations.
34 */
35
36#define MAX_UDP_CHUNK 1460
37#define MAX_SKBS 32
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080039static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41static atomic_t trapped;
42
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070043#define USEC_PER_POLL 50
David S. Millerd9452e92008-03-04 12:28:49 -080044#define NETPOLL_RX_ENABLED 1
45#define NETPOLL_RX_DROP 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define MAX_SKB_SIZE \
48 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
49 sizeof(struct iphdr) + sizeof(struct ethhdr))
50
David S. Miller3578b0c2010-08-03 00:24:04 -070051static void zap_completion_queue(void);
Neil Horman068c6e92006-06-26 00:04:27 -070052static void arp_reply(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Anton Vorontsovbff38772009-07-08 11:10:56 -070054static unsigned int carrier_timeout = 4;
55module_param(carrier_timeout, uint, 0644);
56
David Howellsc4028952006-11-22 14:57:56 +000057static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
David Howells4c1ac1b2006-12-05 14:37:56 +000059 struct netpoll_info *npinfo =
60 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010062 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070064 while ((skb = skb_dequeue(&npinfo->txq))) {
65 struct net_device *dev = skb->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -080066 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -070067 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070069 if (!netif_device_present(dev) || !netif_running(dev)) {
70 __kfree_skb(skb);
71 continue;
72 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
David S. Millerfd2ea0a2008-07-17 01:56:23 -070074 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
75
Ingo Molnar36405432006-12-12 17:20:42 +010076 local_irq_save(flags);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070077 __netif_tx_lock(txq, smp_processor_id());
Eric Dumazet5a0d2262010-11-23 10:42:02 +000078 if (netif_tx_queue_frozen_or_stopped(txq) ||
Stephen Hemminger00829822008-11-20 20:14:53 -080079 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070080 skb_queue_head(&npinfo->txq, skb);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070081 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010082 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Jarek Poplawski25442ca2007-07-05 17:42:44 -070084 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070085 return;
86 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -070087 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010088 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 }
90}
91
Al Virob51655b2006-11-14 21:40:42 -080092static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
93 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Al Virod6f54932006-11-14 21:26:08 -080095 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -080096
Herbert Xu60476372007-04-09 11:59:39 -070097 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 return 0;
99
Herbert Xufb286bb2005-11-10 13:01:24 -0800100 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Patrick McHardy84fa7932006-08-29 16:44:56 -0700102 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -0800103 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -0800104 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Herbert Xufb286bb2005-11-10 13:01:24 -0800106 skb->csum = psum;
107
108 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
111/*
112 * Check whether delayed processing was scheduled for our NIC. If so,
113 * we attempt to grab the poll lock and use ->poll() to pump the card.
114 * If this fails, either we've recursed in ->poll() or it's already
115 * running on another CPU.
116 *
117 * Note: we don't mask interrupts with this lock because we're using
118 * trylock here and interrupts are already disabled in the softirq
119 * case. Further, we test the poll_owner to avoid recursion on UP
120 * systems where the lock doesn't exist.
121 *
122 * In cases where there is bi-directional communications, reading only
123 * one message at a time can lead to packets being dropped by the
124 * network adapter, forcing superfluous retries and possibly timeouts.
125 * Thus, we set our budget to greater than 1.
126 */
David S. Miller0a7606c2007-10-29 21:28:47 -0700127static int poll_one_napi(struct netpoll_info *npinfo,
128 struct napi_struct *napi, int budget)
129{
130 int work;
131
132 /* net_rx_action's ->poll() invocations and our's are
133 * synchronized by this test which is only made while
134 * holding the napi->poll_lock.
135 */
136 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
137 return budget;
138
David S. Millerd9452e92008-03-04 12:28:49 -0800139 npinfo->rx_flags |= NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700140 atomic_inc(&trapped);
Neil Horman7b363e42008-12-09 23:22:26 -0800141 set_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700142
143 work = napi->poll(napi, budget);
David S. Miller7d18f112009-05-21 23:30:09 -0700144 trace_napi_poll(napi);
David S. Miller0a7606c2007-10-29 21:28:47 -0700145
Neil Horman7b363e42008-12-09 23:22:26 -0800146 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700147 atomic_dec(&trapped);
David S. Millerd9452e92008-03-04 12:28:49 -0800148 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700149
150 return budget - work;
151}
152
Stephen Hemminger51069302007-11-19 19:18:11 -0800153static void poll_napi(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700155 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 int budget = 16;
157
Neil Hormanf13d4932010-10-19 07:04:26 +0000158 list_for_each_entry(napi, &dev->napi_list, dev_list) {
David S. Miller0a7606c2007-10-29 21:28:47 -0700159 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700160 spin_trylock(&napi->poll_lock)) {
Stephen Hemminger51069302007-11-19 19:18:11 -0800161 budget = poll_one_napi(dev->npinfo, napi, budget);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700162 spin_unlock(&napi->poll_lock);
David S. Miller0a7606c2007-10-29 21:28:47 -0700163
164 if (!budget)
165 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 }
168}
169
Neil Horman068c6e92006-06-26 00:04:27 -0700170static void service_arp_queue(struct netpoll_info *npi)
171{
Stephen Hemminger51069302007-11-19 19:18:11 -0800172 if (npi) {
173 struct sk_buff *skb;
Neil Horman068c6e92006-06-26 00:04:27 -0700174
Stephen Hemminger51069302007-11-19 19:18:11 -0800175 while ((skb = skb_dequeue(&npi->arp_tx)))
176 arp_reply(skb);
Neil Horman068c6e92006-06-26 00:04:27 -0700177 }
Neil Horman068c6e92006-06-26 00:04:27 -0700178}
179
WANG Cong0e34e932010-05-06 00:47:21 -0700180void netpoll_poll_dev(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000182 const struct net_device_ops *ops;
Stephen Hemminger51069302007-11-19 19:18:11 -0800183
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000184 if (!dev || !netif_running(dev))
185 return;
186
187 ops = dev->netdev_ops;
188 if (!ops->ndo_poll_controller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 return;
190
191 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800192 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Stephen Hemminger51069302007-11-19 19:18:11 -0800194 poll_napi(dev);
195
Amerigo Wang5a698af2011-02-17 23:43:34 +0000196 if (dev->priv_flags & IFF_SLAVE) {
197 if (dev->npinfo) {
198 struct net_device *bond_dev = dev->master;
199 struct sk_buff *skb;
200 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
201 skb->dev = bond_dev;
202 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
203 }
204 }
205 }
206
Stephen Hemminger51069302007-11-19 19:18:11 -0800207 service_arp_queue(dev->npinfo);
Neil Horman068c6e92006-06-26 00:04:27 -0700208
David S. Miller3578b0c2010-08-03 00:24:04 -0700209 zap_completion_queue();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000211EXPORT_SYMBOL(netpoll_poll_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
WANG Cong0e34e932010-05-06 00:47:21 -0700213void netpoll_poll(struct netpoll *np)
214{
215 netpoll_poll_dev(np->dev);
216}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000217EXPORT_SYMBOL(netpoll_poll);
WANG Cong0e34e932010-05-06 00:47:21 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219static void refill_skbs(void)
220{
221 struct sk_buff *skb;
222 unsigned long flags;
223
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800224 spin_lock_irqsave(&skb_pool.lock, flags);
225 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
227 if (!skb)
228 break;
229
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800230 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800232 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233}
234
David S. Miller3578b0c2010-08-03 00:24:04 -0700235static void zap_completion_queue(void)
236{
237 unsigned long flags;
238 struct softnet_data *sd = &get_cpu_var(softnet_data);
239
240 if (sd->completion_queue) {
241 struct sk_buff *clist;
242
243 local_irq_save(flags);
244 clist = sd->completion_queue;
245 sd->completion_queue = NULL;
246 local_irq_restore(flags);
247
248 while (clist != NULL) {
249 struct sk_buff *skb = clist;
250 clist = clist->next;
251 if (skb->destructor) {
252 atomic_inc(&skb->users);
253 dev_kfree_skb_any(skb); /* put this one back */
254 } else {
255 __kfree_skb(skb);
256 }
257 }
258 }
259
260 put_cpu_var(softnet_data);
261}
262
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800263static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800265 int count = 0;
266 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
David S. Miller3578b0c2010-08-03 00:24:04 -0700268 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800269 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
272 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800273 if (!skb)
274 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800277 if (++count < 10) {
278 netpoll_poll(np);
279 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800281 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 }
283
284 atomic_set(&skb->users, 1);
285 skb_reserve(skb, reserve);
286 return skb;
287}
288
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700289static int netpoll_owner_active(struct net_device *dev)
290{
291 struct napi_struct *napi;
292
293 list_for_each_entry(napi, &dev->napi_list, dev_list) {
294 if (napi->poll_owner == smp_processor_id())
295 return 1;
296 }
297 return 0;
298}
299
Neil Hormanc2355e12010-10-13 16:01:49 +0000300void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
301 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700303 int status = NETDEV_TX_BUSY;
304 unsigned long tries;
Stephen Hemminger00829822008-11-20 20:14:53 -0800305 const struct net_device_ops *ops = dev->netdev_ops;
Herbert Xude85d992010-06-10 16:12:44 +0000306 /* It is up to the caller to keep npinfo alive. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900307 struct netpoll_info *npinfo = np->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900309 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
310 __kfree_skb(skb);
311 return;
312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700314 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700315 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700316 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800317 unsigned long flags;
318
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700319 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
320
Andrew Mortona49f99f2006-12-11 17:24:46 -0800321 local_irq_save(flags);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700322 /* try until next clock tick */
323 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
324 tries > 0; --tries) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700325 if (__netif_tx_trylock(txq)) {
Eric Dumazet08baf562009-05-25 22:58:01 -0700326 if (!netif_tx_queue_stopped(txq)) {
Stephen Hemminger00829822008-11-20 20:14:53 -0800327 status = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -0700328 if (status == NETDEV_TX_OK)
329 txq_trans_update(txq);
330 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700331 __netif_tx_unlock(txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700332
Andrew Mortone37b8d92006-12-09 14:01:49 -0800333 if (status == NETDEV_TX_OK)
334 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Andrew Mortone37b8d92006-12-09 14:01:49 -0800336 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700337
338 /* tickle device maybe there is some cleanup */
339 netpoll_poll(np);
340
341 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700342 }
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000343
344 WARN_ONCE(!irqs_disabled(),
345 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
346 dev->name, ops->ndo_start_xmit);
347
Andrew Mortona49f99f2006-12-11 17:24:46 -0800348 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700351 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700352 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000353 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
Neil Hormanc2355e12010-10-13 16:01:49 +0000356EXPORT_SYMBOL(netpoll_send_skb_on_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
358void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
359{
360 int total_len, eth_len, ip_len, udp_len;
361 struct sk_buff *skb;
362 struct udphdr *udph;
363 struct iphdr *iph;
364 struct ethhdr *eth;
365
366 udp_len = len + sizeof(*udph);
367 ip_len = eth_len = udp_len + sizeof(*iph);
368 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
369
370 skb = find_skb(np, total_len, total_len - len);
371 if (!skb)
372 return;
373
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300374 skb_copy_to_linear_data(skb, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 skb->len += len;
376
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300377 skb_push(skb, sizeof(*udph));
378 skb_reset_transport_header(skb);
379 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 udph->source = htons(np->local_port);
381 udph->dest = htons(np->remote_port);
382 udph->len = htons(udp_len);
383 udph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000384 udph->check = csum_tcpudp_magic(np->local_ip,
385 np->remote_ip,
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800386 udp_len, IPPROTO_UDP,
Joe Perches07f07572008-11-19 15:44:53 -0800387 csum_partial(udph, udp_len, 0));
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800388 if (udph->check == 0)
Al Viro5e57dff2006-11-20 18:08:13 -0800389 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700391 skb_push(skb, sizeof(*iph));
392 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700393 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 /* iph->version = 4; iph->ihl = 5; */
396 put_unaligned(0x45, (unsigned char *)iph);
397 iph->tos = 0;
398 put_unaligned(htons(ip_len), &(iph->tot_len));
399 iph->id = 0;
400 iph->frag_off = 0;
401 iph->ttl = 64;
402 iph->protocol = IPPROTO_UDP;
403 iph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000404 put_unaligned(np->local_ip, &(iph->saddr));
405 put_unaligned(np->remote_ip, &(iph->daddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
407
408 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700409 skb_reset_mac_header(skb);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700410 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Stephen Hemminger09538642007-11-19 19:23:29 -0800411 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
412 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 skb->dev = np->dev;
415
416 netpoll_send_skb(np, skb);
417}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000418EXPORT_SYMBOL(netpoll_send_udp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420static void arp_reply(struct sk_buff *skb)
421{
Jeff Moyer115c1d62005-06-22 22:05:31 -0700422 struct netpoll_info *npinfo = skb->dev->npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 struct arphdr *arp;
424 unsigned char *arp_ptr;
425 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
Al Viro252e3342006-11-14 20:48:11 -0800426 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800427 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 struct sk_buff *send_skb;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000429 struct netpoll *np, *tmp;
430 unsigned long flags;
431 int hits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000433 if (list_empty(&npinfo->rx_np))
434 return;
435
436 /* Before checking the packet, we do some early
437 inspection whether this is interesting at all */
438 spin_lock_irqsave(&npinfo->rx_lock, flags);
439 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
440 if (np->dev == skb->dev)
441 hits++;
442 }
443 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
444
445 /* No netpoll struct is using this dev */
446 if (!hits)
Jeff Moyer115c1d62005-06-22 22:05:31 -0700447 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 /* No arp on this interface */
450 if (skb->dev->flags & IFF_NOARP)
451 return;
452
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800453 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 return;
455
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700456 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300457 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -0300458 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
461 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
462 arp->ar_pro != htons(ETH_P_IP) ||
463 arp->ar_op != htons(ARPOP_REQUEST))
464 return;
465
Neil Horman47bbec02006-12-08 00:05:55 -0800466 arp_ptr = (unsigned char *)(arp+1);
467 /* save the location of the src hw addr */
468 sha = arp_ptr;
469 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 memcpy(&sip, arp_ptr, 4);
Neil Horman47bbec02006-12-08 00:05:55 -0800471 arp_ptr += 4;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000472 /* If we actually cared about dst hw addr,
473 it would get copied here */
Neil Horman47bbec02006-12-08 00:05:55 -0800474 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 memcpy(&tip, arp_ptr, 4);
476
477 /* Should we ignore arp? */
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000478 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return;
480
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800481 size = arp_hdr_len(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000483 spin_lock_irqsave(&npinfo->rx_lock, flags);
484 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
485 if (tip != np->local_ip)
486 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000488 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
489 LL_RESERVED_SPACE(np->dev));
490 if (!send_skb)
491 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000493 skb_reset_network_header(send_skb);
494 arp = (struct arphdr *) skb_put(send_skb, size);
495 send_skb->dev = skb->dev;
496 send_skb->protocol = htons(ETH_P_ARP);
497
498 /* Fill the device header for the ARP frame */
499 if (dev_hard_header(send_skb, skb->dev, ptype,
500 sha, np->dev->dev_addr,
501 send_skb->len) < 0) {
502 kfree_skb(send_skb);
503 continue;
504 }
505
506 /*
507 * Fill out the arp protocol part.
508 *
509 * we only support ethernet device type,
510 * which (according to RFC 1390) should
511 * always equal 1 (Ethernet).
512 */
513
514 arp->ar_hrd = htons(np->dev->type);
515 arp->ar_pro = htons(ETH_P_IP);
516 arp->ar_hln = np->dev->addr_len;
517 arp->ar_pln = 4;
518 arp->ar_op = htons(type);
519
520 arp_ptr = (unsigned char *)(arp + 1);
521 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
522 arp_ptr += np->dev->addr_len;
523 memcpy(arp_ptr, &tip, 4);
524 arp_ptr += 4;
525 memcpy(arp_ptr, sha, np->dev->addr_len);
526 arp_ptr += np->dev->addr_len;
527 memcpy(arp_ptr, &sip, 4);
528
529 netpoll_send_skb(np, send_skb);
530
531 /* If there are several rx_hooks for the same address,
532 we're fine by sending a single reply */
533 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000535 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
538int __netpoll_rx(struct sk_buff *skb)
539{
540 int proto, len, ulen;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000541 int hits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 struct iphdr *iph;
543 struct udphdr *uh;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000544 struct netpoll_info *npinfo = skb->dev->npinfo;
545 struct netpoll *np, *tmp;
Neil Horman068c6e92006-06-26 00:04:27 -0700546
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000547 if (list_empty(&npinfo->rx_np))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 goto out;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (skb->dev->type != ARPHRD_ETHER)
551 goto out;
552
David S. Millerd9452e92008-03-04 12:28:49 -0800553 /* check if netpoll clients need ARP */
YOSHIFUJI Hideaki724800d2007-03-25 20:13:04 -0700554 if (skb->protocol == htons(ETH_P_ARP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 atomic_read(&trapped)) {
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000556 skb_queue_tail(&npinfo->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 return 1;
558 }
559
560 proto = ntohs(eth_hdr(skb)->h_proto);
561 if (proto != ETH_P_IP)
562 goto out;
563 if (skb->pkt_type == PACKET_OTHERHOST)
564 goto out;
565 if (skb_shared(skb))
566 goto out;
567
568 iph = (struct iphdr *)skb->data;
569 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
570 goto out;
571 if (iph->ihl < 5 || iph->version != 4)
572 goto out;
573 if (!pskb_may_pull(skb, iph->ihl*4))
574 goto out;
575 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
576 goto out;
577
578 len = ntohs(iph->tot_len);
579 if (skb->len < len || len < iph->ihl*4)
580 goto out;
581
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700582 /*
583 * Our transport medium may have padded the buffer out.
584 * Now We trim to the true length of the frame.
585 */
586 if (pskb_trim_rcsum(skb, len))
587 goto out;
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if (iph->protocol != IPPROTO_UDP)
590 goto out;
591
592 len -= iph->ihl*4;
593 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
594 ulen = ntohs(uh->len);
595
596 if (ulen != len)
597 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800598 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000601 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
602 if (np->local_ip && np->local_ip != iph->daddr)
603 continue;
604 if (np->remote_ip && np->remote_ip != iph->saddr)
605 continue;
606 if (np->local_port && np->local_port != ntohs(uh->dest))
607 continue;
608
609 np->rx_hook(np, ntohs(uh->source),
610 (char *)(uh+1),
611 ulen - sizeof(struct udphdr));
612 hits++;
613 }
614
615 if (!hits)
616 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
618 kfree_skb(skb);
619 return 1;
620
621out:
622 if (atomic_read(&trapped)) {
623 kfree_skb(skb);
624 return 1;
625 }
626
627 return 0;
628}
629
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700630void netpoll_print_options(struct netpoll *np)
631{
632 printk(KERN_INFO "%s: local port %d\n",
633 np->name, np->local_port);
Harvey Harrisone7557af2009-03-28 15:38:31 +0000634 printk(KERN_INFO "%s: local IP %pI4\n",
635 np->name, &np->local_ip);
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000636 printk(KERN_INFO "%s: interface '%s'\n",
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700637 np->name, np->dev_name);
638 printk(KERN_INFO "%s: remote port %d\n",
639 np->name, np->remote_port);
Harvey Harrisone7557af2009-03-28 15:38:31 +0000640 printk(KERN_INFO "%s: remote IP %pI4\n",
641 np->name, &np->remote_ip);
Johannes Berge1749612008-10-27 15:59:26 -0700642 printk(KERN_INFO "%s: remote ethernet address %pM\n",
643 np->name, np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700644}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000645EXPORT_SYMBOL(netpoll_print_options);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647int netpoll_parse_options(struct netpoll *np, char *opt)
648{
649 char *cur=opt, *delim;
650
David S. Millerc68b9072006-11-14 20:40:49 -0800651 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if ((delim = strchr(cur, '@')) == NULL)
653 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800654 *delim = 0;
655 np->local_port = simple_strtol(cur, NULL, 10);
656 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 }
658 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
David S. Millerc68b9072006-11-14 20:40:49 -0800660 if (*cur != '/') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 if ((delim = strchr(cur, '/')) == NULL)
662 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800663 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000664 np->local_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800665 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 }
667 cur++;
668
David S. Millerc68b9072006-11-14 20:40:49 -0800669 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 /* parse out dev name */
671 if ((delim = strchr(cur, ',')) == NULL)
672 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800673 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800675 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 }
677 cur++;
678
David S. Millerc68b9072006-11-14 20:40:49 -0800679 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 /* dst port */
681 if ((delim = strchr(cur, '@')) == NULL)
682 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800683 *delim = 0;
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000684 if (*cur == ' ' || *cur == '\t')
685 printk(KERN_INFO "%s: warning: whitespace"
686 "is not allowed\n", np->name);
David S. Millerc68b9072006-11-14 20:40:49 -0800687 np->remote_port = simple_strtol(cur, NULL, 10);
688 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
690 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 /* dst ip */
693 if ((delim = strchr(cur, '/')) == NULL)
694 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800695 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000696 np->remote_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800697 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
David S. Millerc68b9072006-11-14 20:40:49 -0800699 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 /* MAC address */
701 if ((delim = strchr(cur, ':')) == NULL)
702 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800703 *delim = 0;
704 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
705 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 if ((delim = strchr(cur, ':')) == NULL)
707 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800708 *delim = 0;
709 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
710 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 if ((delim = strchr(cur, ':')) == NULL)
712 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800713 *delim = 0;
714 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
715 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if ((delim = strchr(cur, ':')) == NULL)
717 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800718 *delim = 0;
719 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
720 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if ((delim = strchr(cur, ':')) == NULL)
722 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800723 *delim = 0;
724 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
725 cur = delim + 1;
726 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 }
728
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700729 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
731 return 0;
732
733 parse_failed:
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000734 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 np->name, cur);
736 return -1;
737}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000738EXPORT_SYMBOL(netpoll_parse_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000740int __netpoll_setup(struct netpoll *np)
741{
742 struct net_device *ndev = np->dev;
743 struct netpoll_info *npinfo;
744 const struct net_device_ops *ops;
745 unsigned long flags;
746 int err;
747
748 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
749 !ndev->netdev_ops->ndo_poll_controller) {
750 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
751 np->name, np->dev_name);
752 err = -ENOTSUPP;
753 goto out;
754 }
755
756 if (!ndev->npinfo) {
757 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
758 if (!npinfo) {
759 err = -ENOMEM;
760 goto out;
761 }
762
763 npinfo->rx_flags = 0;
764 INIT_LIST_HEAD(&npinfo->rx_np);
765
766 spin_lock_init(&npinfo->rx_lock);
767 skb_queue_head_init(&npinfo->arp_tx);
768 skb_queue_head_init(&npinfo->txq);
769 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
770
771 atomic_set(&npinfo->refcnt, 1);
772
773 ops = np->dev->netdev_ops;
774 if (ops->ndo_netpoll_setup) {
775 err = ops->ndo_netpoll_setup(ndev, npinfo);
776 if (err)
777 goto free_npinfo;
778 }
779 } else {
780 npinfo = ndev->npinfo;
781 atomic_inc(&npinfo->refcnt);
782 }
783
784 npinfo->netpoll = np;
785
786 if (np->rx_hook) {
787 spin_lock_irqsave(&npinfo->rx_lock, flags);
788 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
789 list_add_tail(&np->rx, &npinfo->rx_np);
790 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
791 }
792
793 /* last thing to do is link it to the net device structure */
794 rcu_assign_pointer(ndev->npinfo, npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000795
796 return 0;
797
798free_npinfo:
799 kfree(npinfo);
800out:
801 return err;
802}
803EXPORT_SYMBOL_GPL(__netpoll_setup);
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805int netpoll_setup(struct netpoll *np)
806{
807 struct net_device *ndev = NULL;
808 struct in_device *in_dev;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700809 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
811 if (np->dev_name)
Eric W. Biederman881d9662007-09-17 11:56:21 -0700812 ndev = dev_get_by_name(&init_net, np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 if (!ndev) {
814 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
815 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700816 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 }
818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 if (!netif_running(ndev)) {
820 unsigned long atmost, atleast;
821
822 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
823 np->name, np->dev_name);
824
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800825 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700826 err = dev_open(ndev);
827 rtnl_unlock();
828
829 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 printk(KERN_ERR "%s: failed to open %s\n",
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700831 np->name, ndev->name);
Herbert Xudbaa1542010-06-10 16:12:46 +0000832 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 atleast = jiffies + HZ/10;
Anton Vorontsovbff38772009-07-08 11:10:56 -0700836 atmost = jiffies + carrier_timeout * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 while (!netif_carrier_ok(ndev)) {
838 if (time_after(jiffies, atmost)) {
839 printk(KERN_NOTICE
840 "%s: timeout waiting for carrier\n",
841 np->name);
842 break;
843 }
Anton Vorontsov1b614fb2009-07-08 20:09:44 -0700844 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 }
846
847 /* If carrier appears to come up instantly, we don't
848 * trust it and pause so that we don't pump all our
849 * queued console messages into the bitbucket.
850 */
851
852 if (time_before(jiffies, atleast)) {
853 printk(KERN_NOTICE "%s: carrier detect appears"
854 " untrustworthy, waiting 4 seconds\n",
855 np->name);
856 msleep(4000);
857 }
858 }
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 if (!np->local_ip) {
861 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700862 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 if (!in_dev || !in_dev->ifa_list) {
865 rcu_read_unlock();
866 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
867 np->name, np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700868 err = -EDESTADDRREQ;
Herbert Xudbaa1542010-06-10 16:12:46 +0000869 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 }
871
Harvey Harrisone7557af2009-03-28 15:38:31 +0000872 np->local_ip = in_dev->ifa_list->ifa_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 rcu_read_unlock();
Harvey Harrisone7557af2009-03-28 15:38:31 +0000874 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 }
876
Herbert Xudbaa1542010-06-10 16:12:46 +0000877 np->dev = ndev;
878
879 /* fill up the skb queue */
880 refill_skbs();
881
882 rtnl_lock();
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000883 err = __netpoll_setup(np);
Herbert Xudbaa1542010-06-10 16:12:46 +0000884 rtnl_unlock();
Matt Mackall53fb95d2005-08-11 19:27:43 -0700885
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000886 if (err)
887 goto put;
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 return 0;
890
Jiri Slaby21edbb22010-03-16 05:29:54 +0000891put:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700893 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000895EXPORT_SYMBOL(netpoll_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
David S. Millerc68b9072006-11-14 20:40:49 -0800897static int __init netpoll_init(void)
898{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800899 skb_queue_head_init(&skb_pool);
900 return 0;
901}
902core_initcall(netpoll_init);
903
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000904void __netpoll_cleanup(struct netpoll *np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700906 struct netpoll_info *npinfo;
907 unsigned long flags;
908
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000909 npinfo = np->dev->npinfo;
910 if (!npinfo)
Herbert Xudbaa1542010-06-10 16:12:46 +0000911 return;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700912
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000913 if (!list_empty(&npinfo->rx_np)) {
914 spin_lock_irqsave(&npinfo->rx_lock, flags);
915 list_del(&np->rx);
916 if (list_empty(&npinfo->rx_np))
917 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
918 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Jeff Moyer115c1d62005-06-22 22:05:31 -0700919 }
Herbert Xudbaa1542010-06-10 16:12:46 +0000920
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000921 if (atomic_dec_and_test(&npinfo->refcnt)) {
922 const struct net_device_ops *ops;
923
924 ops = np->dev->netdev_ops;
925 if (ops->ndo_netpoll_cleanup)
926 ops->ndo_netpoll_cleanup(np->dev);
927
928 rcu_assign_pointer(np->dev->npinfo, NULL);
929
Herbert Xudbaa1542010-06-10 16:12:46 +0000930 /* avoid racing with NAPI reading npinfo */
931 synchronize_rcu_bh();
932
933 skb_queue_purge(&npinfo->arp_tx);
934 skb_queue_purge(&npinfo->txq);
Tejun Heoafe2c512010-12-14 16:21:17 +0100935 cancel_delayed_work_sync(&npinfo->tx_work);
Herbert Xudbaa1542010-06-10 16:12:46 +0000936
937 /* clean after last, unfinished work */
938 __skb_queue_purge(&npinfo->txq);
939 kfree(npinfo);
940 }
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000941}
942EXPORT_SYMBOL_GPL(__netpoll_cleanup);
943
944void netpoll_cleanup(struct netpoll *np)
945{
946 if (!np->dev)
947 return;
948
949 rtnl_lock();
950 __netpoll_cleanup(np);
951 rtnl_unlock();
Herbert Xudbaa1542010-06-10 16:12:46 +0000952
953 dev_put(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 np->dev = NULL;
955}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000956EXPORT_SYMBOL(netpoll_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
958int netpoll_trap(void)
959{
960 return atomic_read(&trapped);
961}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000962EXPORT_SYMBOL(netpoll_trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964void netpoll_set_trap(int trap)
965{
966 if (trap)
967 atomic_inc(&trapped);
968 else
969 atomic_dec(&trapped);
970}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971EXPORT_SYMBOL(netpoll_set_trap);