blob: 5af9c269250681f27593cb7f3fd46b2d6e12e998 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Joe Perchese6ec269352012-01-29 15:50:43 +000012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Anton Vorontsovbff38772009-07-08 11:10:56 -070014#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020018#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/inetdevice.h>
20#include <linux/inet.h>
21#include <linux/interrupt.h>
22#include <linux/netpoll.h>
23#include <linux/sched.h>
24#include <linux/delay.h>
25#include <linux/rcupdate.h>
26#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040028#include <linux/export.h>
Amerigo Wang689971b2012-08-10 01:24:49 +000029#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <net/tcp.h>
31#include <net/udp.h>
32#include <asm/unaligned.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -070033#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35/*
36 * We maintain a small pool of fully-sized skbs, to make sure the
37 * message gets out even in extreme OOM situations.
38 */
39
40#define MAX_UDP_CHUNK 1460
41#define MAX_SKBS 32
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080043static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static atomic_t trapped;
46
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070047#define USEC_PER_POLL 50
David S. Millerd9452e92008-03-04 12:28:49 -080048#define NETPOLL_RX_ENABLED 1
49#define NETPOLL_RX_DROP 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Joe Perches6f706242012-01-29 15:50:44 +000051#define MAX_SKB_SIZE \
52 (sizeof(struct ethhdr) + \
53 sizeof(struct iphdr) + \
54 sizeof(struct udphdr) + \
55 MAX_UDP_CHUNK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Miller3578b0c2010-08-03 00:24:04 -070057static void zap_completion_queue(void);
Amerigo Wang28996562012-08-10 01:24:42 +000058static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Anton Vorontsovbff38772009-07-08 11:10:56 -070060static unsigned int carrier_timeout = 4;
61module_param(carrier_timeout, uint, 0644);
62
Joe Perchese6ec269352012-01-29 15:50:43 +000063#define np_info(np, fmt, ...) \
64 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
65#define np_err(np, fmt, ...) \
66 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_notice(np, fmt, ...) \
68 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
69
David Howellsc4028952006-11-22 14:57:56 +000070static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
David Howells4c1ac1b2006-12-05 14:37:56 +000072 struct netpoll_info *npinfo =
73 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010075 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070077 while ((skb = skb_dequeue(&npinfo->txq))) {
78 struct net_device *dev = skb->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -080079 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -070080 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070082 if (!netif_device_present(dev) || !netif_running(dev)) {
83 __kfree_skb(skb);
84 continue;
85 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
David S. Millerfd2ea0a2008-07-17 01:56:23 -070087 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
88
Ingo Molnar36405432006-12-12 17:20:42 +010089 local_irq_save(flags);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070090 __netif_tx_lock(txq, smp_processor_id());
Tom Herbert734664982011-11-28 16:32:44 +000091 if (netif_xmit_frozen_or_stopped(txq) ||
Stephen Hemminger00829822008-11-20 20:14:53 -080092 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070093 skb_queue_head(&npinfo->txq, skb);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070094 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010095 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Jarek Poplawski25442ca2007-07-05 17:42:44 -070097 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070098 return;
99 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700100 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +0100101 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 }
103}
104
Al Virob51655b2006-11-14 21:40:42 -0800105static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
106 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Al Virod6f5493c2006-11-14 21:26:08 -0800108 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -0800109
Herbert Xu60476372007-04-09 11:59:39 -0700110 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return 0;
112
Herbert Xufb286bb2005-11-10 13:01:24 -0800113 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Patrick McHardy84fa7932006-08-29 16:44:56 -0700115 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -0800116 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -0800117 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Herbert Xufb286bb2005-11-10 13:01:24 -0800119 skb->csum = psum;
120
121 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
124/*
125 * Check whether delayed processing was scheduled for our NIC. If so,
126 * we attempt to grab the poll lock and use ->poll() to pump the card.
127 * If this fails, either we've recursed in ->poll() or it's already
128 * running on another CPU.
129 *
130 * Note: we don't mask interrupts with this lock because we're using
131 * trylock here and interrupts are already disabled in the softirq
132 * case. Further, we test the poll_owner to avoid recursion on UP
133 * systems where the lock doesn't exist.
134 *
135 * In cases where there is bi-directional communications, reading only
136 * one message at a time can lead to packets being dropped by the
137 * network adapter, forcing superfluous retries and possibly timeouts.
138 * Thus, we set our budget to greater than 1.
139 */
David S. Miller0a7606c2007-10-29 21:28:47 -0700140static int poll_one_napi(struct netpoll_info *npinfo,
141 struct napi_struct *napi, int budget)
142{
143 int work;
144
145 /* net_rx_action's ->poll() invocations and our's are
146 * synchronized by this test which is only made while
147 * holding the napi->poll_lock.
148 */
149 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
150 return budget;
151
David S. Millerd9452e92008-03-04 12:28:49 -0800152 npinfo->rx_flags |= NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700153 atomic_inc(&trapped);
Neil Horman7b363e42008-12-09 23:22:26 -0800154 set_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700155
156 work = napi->poll(napi, budget);
David S. Miller7d18f112009-05-21 23:30:09 -0700157 trace_napi_poll(napi);
David S. Miller0a7606c2007-10-29 21:28:47 -0700158
Neil Horman7b363e42008-12-09 23:22:26 -0800159 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700160 atomic_dec(&trapped);
David S. Millerd9452e92008-03-04 12:28:49 -0800161 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700162
163 return budget - work;
164}
165
Stephen Hemminger51069302007-11-19 19:18:11 -0800166static void poll_napi(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700168 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 int budget = 16;
170
Amerigo Wang6bdb7fe2012-08-10 01:24:50 +0000171 WARN_ON_ONCE(!irqs_disabled());
172
Neil Hormanf13d4932010-10-19 07:04:26 +0000173 list_for_each_entry(napi, &dev->napi_list, dev_list) {
Amerigo Wang6bdb7fe2012-08-10 01:24:50 +0000174 local_irq_enable();
David S. Miller0a7606c2007-10-29 21:28:47 -0700175 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700176 spin_trylock(&napi->poll_lock)) {
Amerigo Wang6bdb7fe2012-08-10 01:24:50 +0000177 rcu_read_lock_bh();
Amerigo Wang28996562012-08-10 01:24:42 +0000178 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
179 napi, budget);
Amerigo Wang6bdb7fe2012-08-10 01:24:50 +0000180 rcu_read_unlock_bh();
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700181 spin_unlock(&napi->poll_lock);
David S. Miller0a7606c2007-10-29 21:28:47 -0700182
Amerigo Wang6bdb7fe2012-08-10 01:24:50 +0000183 if (!budget) {
184 local_irq_disable();
David S. Miller0a7606c2007-10-29 21:28:47 -0700185 break;
Amerigo Wang6bdb7fe2012-08-10 01:24:50 +0000186 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700187 }
Amerigo Wang6bdb7fe2012-08-10 01:24:50 +0000188 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
190}
191
Neil Horman068c6e92006-06-26 00:04:27 -0700192static void service_arp_queue(struct netpoll_info *npi)
193{
Stephen Hemminger51069302007-11-19 19:18:11 -0800194 if (npi) {
195 struct sk_buff *skb;
Neil Horman068c6e92006-06-26 00:04:27 -0700196
Stephen Hemminger51069302007-11-19 19:18:11 -0800197 while ((skb = skb_dequeue(&npi->arp_tx)))
Amerigo Wang28996562012-08-10 01:24:42 +0000198 netpoll_arp_reply(skb, npi);
Neil Horman068c6e92006-06-26 00:04:27 -0700199 }
Neil Horman068c6e92006-06-26 00:04:27 -0700200}
201
Joe Perches234b9212011-06-30 15:08:57 +0000202static void netpoll_poll_dev(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000204 const struct net_device_ops *ops;
Amerigo Wang28996562012-08-10 01:24:42 +0000205 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
Stephen Hemminger51069302007-11-19 19:18:11 -0800206
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000207 if (!dev || !netif_running(dev))
208 return;
209
210 ops = dev->netdev_ops;
211 if (!ops->ndo_poll_controller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 return;
213
214 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800215 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Stephen Hemminger51069302007-11-19 19:18:11 -0800217 poll_napi(dev);
218
Eric Dumazet58e05f32012-02-14 10:11:59 +0000219 if (dev->flags & IFF_SLAVE) {
Amerigo Wang28996562012-08-10 01:24:42 +0000220 if (ni) {
Amerigo Wang5a698af2011-02-17 23:43:34 +0000221 struct net_device *bond_dev = dev->master;
222 struct sk_buff *skb;
Amerigo Wang28996562012-08-10 01:24:42 +0000223 struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
224 while ((skb = skb_dequeue(&ni->arp_tx))) {
Amerigo Wang5a698af2011-02-17 23:43:34 +0000225 skb->dev = bond_dev;
Amerigo Wang28996562012-08-10 01:24:42 +0000226 skb_queue_tail(&bond_ni->arp_tx, skb);
Amerigo Wang5a698af2011-02-17 23:43:34 +0000227 }
228 }
229 }
230
Amerigo Wang28996562012-08-10 01:24:42 +0000231 service_arp_queue(ni);
Neil Horman068c6e92006-06-26 00:04:27 -0700232
David S. Miller3578b0c2010-08-03 00:24:04 -0700233 zap_completion_queue();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234}
235
236static void refill_skbs(void)
237{
238 struct sk_buff *skb;
239 unsigned long flags;
240
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800241 spin_lock_irqsave(&skb_pool.lock, flags);
242 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
244 if (!skb)
245 break;
246
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800247 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800249 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
David S. Miller3578b0c2010-08-03 00:24:04 -0700252static void zap_completion_queue(void)
253{
254 unsigned long flags;
255 struct softnet_data *sd = &get_cpu_var(softnet_data);
256
257 if (sd->completion_queue) {
258 struct sk_buff *clist;
259
260 local_irq_save(flags);
261 clist = sd->completion_queue;
262 sd->completion_queue = NULL;
263 local_irq_restore(flags);
264
265 while (clist != NULL) {
266 struct sk_buff *skb = clist;
267 clist = clist->next;
268 if (skb->destructor) {
269 atomic_inc(&skb->users);
270 dev_kfree_skb_any(skb); /* put this one back */
271 } else {
272 __kfree_skb(skb);
273 }
274 }
275 }
276
277 put_cpu_var(softnet_data);
278}
279
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800280static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800282 int count = 0;
283 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
David S. Miller3578b0c2010-08-03 00:24:04 -0700285 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800286 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800290 if (!skb)
291 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800294 if (++count < 10) {
Joe Perches2a49e002011-06-30 15:08:58 +0000295 netpoll_poll_dev(np->dev);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800296 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800298 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 }
300
301 atomic_set(&skb->users, 1);
302 skb_reserve(skb, reserve);
303 return skb;
304}
305
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700306static int netpoll_owner_active(struct net_device *dev)
307{
308 struct napi_struct *napi;
309
310 list_for_each_entry(napi, &dev->napi_list, dev_list) {
311 if (napi->poll_owner == smp_processor_id())
312 return 1;
313 }
314 return 0;
315}
316
Amerigo Wang28996562012-08-10 01:24:42 +0000317/* call with IRQ disabled */
Neil Hormanc2355e12010-10-13 16:01:49 +0000318void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
319 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700321 int status = NETDEV_TX_BUSY;
322 unsigned long tries;
Stephen Hemminger00829822008-11-20 20:14:53 -0800323 const struct net_device_ops *ops = dev->netdev_ops;
Herbert Xude85d992010-06-10 16:12:44 +0000324 /* It is up to the caller to keep npinfo alive. */
Amerigo Wang28996562012-08-10 01:24:42 +0000325 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Amerigo Wang28996562012-08-10 01:24:42 +0000327 WARN_ON_ONCE(!irqs_disabled());
328
329 npinfo = rcu_dereference_bh(np->dev->npinfo);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900330 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
331 __kfree_skb(skb);
332 return;
333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700335 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700336 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700337 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800338
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700339 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
340
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700341 /* try until next clock tick */
342 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
343 tries > 0; --tries) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700344 if (__netif_tx_trylock(txq)) {
Tom Herbert734664982011-11-28 16:32:44 +0000345 if (!netif_xmit_stopped(txq)) {
Amerigo Wang689971b2012-08-10 01:24:49 +0000346 if (vlan_tx_tag_present(skb) &&
347 !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
348 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
349 if (unlikely(!skb))
350 break;
351 skb->vlan_tci = 0;
352 }
353
Stephen Hemminger00829822008-11-20 20:14:53 -0800354 status = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -0700355 if (status == NETDEV_TX_OK)
356 txq_trans_update(txq);
357 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700358 __netif_tx_unlock(txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700359
Andrew Mortone37b8d92006-12-09 14:01:49 -0800360 if (status == NETDEV_TX_OK)
361 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Andrew Mortone37b8d92006-12-09 14:01:49 -0800363 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700364
365 /* tickle device maybe there is some cleanup */
Joe Perches2a49e002011-06-30 15:08:58 +0000366 netpoll_poll_dev(np->dev);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700367
368 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700369 }
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000370
371 WARN_ONCE(!irqs_disabled(),
Amerigo Wang28996562012-08-10 01:24:42 +0000372 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000373 dev->name, ops->ndo_start_xmit);
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700377 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700378 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000379 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
Neil Hormanc2355e12010-10-13 16:01:49 +0000382EXPORT_SYMBOL(netpoll_send_skb_on_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
385{
Eric Dumazet954fba02012-06-12 19:30:21 +0000386 int total_len, ip_len, udp_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 struct sk_buff *skb;
388 struct udphdr *udph;
389 struct iphdr *iph;
390 struct ethhdr *eth;
Eric Dumazetee130402012-08-24 01:47:26 +0000391 static atomic_t ip_ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 udp_len = len + sizeof(*udph);
Eric Dumazet954fba02012-06-12 19:30:21 +0000394 ip_len = udp_len + sizeof(*iph);
395 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Eric Dumazet954fba02012-06-12 19:30:21 +0000397 skb = find_skb(np, total_len + np->dev->needed_tailroom,
398 total_len - len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 if (!skb)
400 return;
401
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300402 skb_copy_to_linear_data(skb, msg, len);
Eric Dumazet954fba02012-06-12 19:30:21 +0000403 skb_put(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300405 skb_push(skb, sizeof(*udph));
406 skb_reset_transport_header(skb);
407 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 udph->source = htons(np->local_port);
409 udph->dest = htons(np->remote_port);
410 udph->len = htons(udp_len);
411 udph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000412 udph->check = csum_tcpudp_magic(np->local_ip,
413 np->remote_ip,
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800414 udp_len, IPPROTO_UDP,
Joe Perches07f07572008-11-19 15:44:53 -0800415 csum_partial(udph, udp_len, 0));
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800416 if (udph->check == 0)
Al Viro5e57dff2006-11-20 18:08:13 -0800417 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700419 skb_push(skb, sizeof(*iph));
420 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700421 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 /* iph->version = 4; iph->ihl = 5; */
424 put_unaligned(0x45, (unsigned char *)iph);
425 iph->tos = 0;
426 put_unaligned(htons(ip_len), &(iph->tot_len));
Eric Dumazetee130402012-08-24 01:47:26 +0000427 iph->id = htons(atomic_inc_return(&ip_ident));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 iph->frag_off = 0;
429 iph->ttl = 64;
430 iph->protocol = IPPROTO_UDP;
431 iph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000432 put_unaligned(np->local_ip, &(iph->saddr));
433 put_unaligned(np->remote_ip, &(iph->daddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
435
436 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700437 skb_reset_mac_header(skb);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700438 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Stephen Hemminger09538642007-11-19 19:23:29 -0800439 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
440 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 skb->dev = np->dev;
443
444 netpoll_send_skb(np, skb);
445}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000446EXPORT_SYMBOL(netpoll_send_udp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Amerigo Wang28996562012-08-10 01:24:42 +0000448static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
450 struct arphdr *arp;
451 unsigned char *arp_ptr;
452 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
Al Viro252e33462006-11-14 20:48:11 -0800453 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800454 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 struct sk_buff *send_skb;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000456 struct netpoll *np, *tmp;
457 unsigned long flags;
Herbert Xuae641942011-11-18 02:20:04 +0000458 int hlen, tlen;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000459 int hits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000461 if (list_empty(&npinfo->rx_np))
462 return;
463
464 /* Before checking the packet, we do some early
465 inspection whether this is interesting at all */
466 spin_lock_irqsave(&npinfo->rx_lock, flags);
467 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
468 if (np->dev == skb->dev)
469 hits++;
470 }
471 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
472
473 /* No netpoll struct is using this dev */
474 if (!hits)
Jeff Moyer115c1d62005-06-22 22:05:31 -0700475 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 /* No arp on this interface */
478 if (skb->dev->flags & IFF_NOARP)
479 return;
480
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800481 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 return;
483
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700484 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300485 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -0300486 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
489 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
490 arp->ar_pro != htons(ETH_P_IP) ||
491 arp->ar_op != htons(ARPOP_REQUEST))
492 return;
493
Neil Horman47bbec02006-12-08 00:05:55 -0800494 arp_ptr = (unsigned char *)(arp+1);
495 /* save the location of the src hw addr */
496 sha = arp_ptr;
497 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 memcpy(&sip, arp_ptr, 4);
Neil Horman47bbec02006-12-08 00:05:55 -0800499 arp_ptr += 4;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000500 /* If we actually cared about dst hw addr,
501 it would get copied here */
Neil Horman47bbec02006-12-08 00:05:55 -0800502 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 memcpy(&tip, arp_ptr, 4);
504
505 /* Should we ignore arp? */
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000506 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 return;
508
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800509 size = arp_hdr_len(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000511 spin_lock_irqsave(&npinfo->rx_lock, flags);
512 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
513 if (tip != np->local_ip)
514 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Herbert Xuae641942011-11-18 02:20:04 +0000516 hlen = LL_RESERVED_SPACE(np->dev);
517 tlen = np->dev->needed_tailroom;
518 send_skb = find_skb(np, size + hlen + tlen, hlen);
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000519 if (!send_skb)
520 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000522 skb_reset_network_header(send_skb);
523 arp = (struct arphdr *) skb_put(send_skb, size);
524 send_skb->dev = skb->dev;
525 send_skb->protocol = htons(ETH_P_ARP);
526
527 /* Fill the device header for the ARP frame */
528 if (dev_hard_header(send_skb, skb->dev, ptype,
529 sha, np->dev->dev_addr,
530 send_skb->len) < 0) {
531 kfree_skb(send_skb);
532 continue;
533 }
534
535 /*
536 * Fill out the arp protocol part.
537 *
538 * we only support ethernet device type,
539 * which (according to RFC 1390) should
540 * always equal 1 (Ethernet).
541 */
542
543 arp->ar_hrd = htons(np->dev->type);
544 arp->ar_pro = htons(ETH_P_IP);
545 arp->ar_hln = np->dev->addr_len;
546 arp->ar_pln = 4;
547 arp->ar_op = htons(type);
548
549 arp_ptr = (unsigned char *)(arp + 1);
550 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
551 arp_ptr += np->dev->addr_len;
552 memcpy(arp_ptr, &tip, 4);
553 arp_ptr += 4;
554 memcpy(arp_ptr, sha, np->dev->addr_len);
555 arp_ptr += np->dev->addr_len;
556 memcpy(arp_ptr, &sip, 4);
557
558 netpoll_send_skb(np, send_skb);
559
560 /* If there are several rx_hooks for the same address,
561 we're fine by sending a single reply */
562 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 }
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000564 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
Amerigo Wang57c5d462012-08-10 01:24:40 +0000567int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
569 int proto, len, ulen;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000570 int hits = 0;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000571 const struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 struct udphdr *uh;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000573 struct netpoll *np, *tmp;
Neil Horman068c6e92006-06-26 00:04:27 -0700574
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000575 if (list_empty(&npinfo->rx_np))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 goto out;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 if (skb->dev->type != ARPHRD_ETHER)
579 goto out;
580
David S. Millerd9452e92008-03-04 12:28:49 -0800581 /* check if netpoll clients need ARP */
YOSHIFUJI Hideaki724800d2007-03-25 20:13:04 -0700582 if (skb->protocol == htons(ETH_P_ARP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 atomic_read(&trapped)) {
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000584 skb_queue_tail(&npinfo->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return 1;
586 }
587
Amerigo Wang689971b2012-08-10 01:24:49 +0000588 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
589 skb = vlan_untag(skb);
590 if (unlikely(!skb))
591 goto out;
592 }
593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 proto = ntohs(eth_hdr(skb)->h_proto);
595 if (proto != ETH_P_IP)
596 goto out;
597 if (skb->pkt_type == PACKET_OTHERHOST)
598 goto out;
599 if (skb_shared(skb))
600 goto out;
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
603 goto out;
Eric Dumazete9278a42011-08-26 06:26:15 +0000604 iph = (struct iphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 if (iph->ihl < 5 || iph->version != 4)
606 goto out;
607 if (!pskb_may_pull(skb, iph->ihl*4))
608 goto out;
Eric Dumazete9278a42011-08-26 06:26:15 +0000609 iph = (struct iphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
611 goto out;
612
613 len = ntohs(iph->tot_len);
614 if (skb->len < len || len < iph->ihl*4)
615 goto out;
616
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700617 /*
618 * Our transport medium may have padded the buffer out.
619 * Now We trim to the true length of the frame.
620 */
621 if (pskb_trim_rcsum(skb, len))
622 goto out;
623
Eric Dumazete9278a42011-08-26 06:26:15 +0000624 iph = (struct iphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 if (iph->protocol != IPPROTO_UDP)
626 goto out;
627
628 len -= iph->ihl*4;
629 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
630 ulen = ntohs(uh->len);
631
632 if (ulen != len)
633 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800634 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000637 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
638 if (np->local_ip && np->local_ip != iph->daddr)
639 continue;
640 if (np->remote_ip && np->remote_ip != iph->saddr)
641 continue;
642 if (np->local_port && np->local_port != ntohs(uh->dest))
643 continue;
644
645 np->rx_hook(np, ntohs(uh->source),
646 (char *)(uh+1),
647 ulen - sizeof(struct udphdr));
648 hits++;
649 }
650
651 if (!hits)
652 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654 kfree_skb(skb);
655 return 1;
656
657out:
658 if (atomic_read(&trapped)) {
659 kfree_skb(skb);
660 return 1;
661 }
662
663 return 0;
664}
665
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700666void netpoll_print_options(struct netpoll *np)
667{
Joe Perchese6ec269352012-01-29 15:50:43 +0000668 np_info(np, "local port %d\n", np->local_port);
669 np_info(np, "local IP %pI4\n", &np->local_ip);
670 np_info(np, "interface '%s'\n", np->dev_name);
671 np_info(np, "remote port %d\n", np->remote_port);
672 np_info(np, "remote IP %pI4\n", &np->remote_ip);
673 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700674}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000675EXPORT_SYMBOL(netpoll_print_options);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677int netpoll_parse_options(struct netpoll *np, char *opt)
678{
679 char *cur=opt, *delim;
680
David S. Millerc68b9072006-11-14 20:40:49 -0800681 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 if ((delim = strchr(cur, '@')) == NULL)
683 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800684 *delim = 0;
685 np->local_port = simple_strtol(cur, NULL, 10);
686 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 }
688 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
David S. Millerc68b9072006-11-14 20:40:49 -0800690 if (*cur != '/') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if ((delim = strchr(cur, '/')) == NULL)
692 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800693 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000694 np->local_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800695 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697 cur++;
698
David S. Millerc68b9072006-11-14 20:40:49 -0800699 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 /* parse out dev name */
701 if ((delim = strchr(cur, ',')) == NULL)
702 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800703 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800705 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
707 cur++;
708
David S. Millerc68b9072006-11-14 20:40:49 -0800709 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 /* dst port */
711 if ((delim = strchr(cur, '@')) == NULL)
712 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800713 *delim = 0;
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000714 if (*cur == ' ' || *cur == '\t')
Joe Perchese6ec269352012-01-29 15:50:43 +0000715 np_info(np, "warning: whitespace is not allowed\n");
David S. Millerc68b9072006-11-14 20:40:49 -0800716 np->remote_port = simple_strtol(cur, NULL, 10);
717 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 }
719 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 /* dst ip */
722 if ((delim = strchr(cur, '/')) == NULL)
723 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800724 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000725 np->remote_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800726 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
David S. Millerc68b9072006-11-14 20:40:49 -0800728 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 /* MAC address */
Alexey Dobriyan4940fc82011-05-07 23:00:07 +0000730 if (!mac_pton(cur, np->remote_mac))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 goto parse_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
733
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700734 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 return 0;
737
738 parse_failed:
Joe Perchese6ec269352012-01-29 15:50:43 +0000739 np_info(np, "couldn't parse config at '%s'!\n", cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 return -1;
741}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000742EXPORT_SYMBOL(netpoll_parse_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Amerigo Wang47be03a22012-08-10 01:24:37 +0000744int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000745{
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000746 struct netpoll_info *npinfo;
747 const struct net_device_ops *ops;
748 unsigned long flags;
749 int err;
750
Jiri Pirko30fdd8a02012-07-17 05:22:35 +0000751 np->dev = ndev;
752 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
753
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000754 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
755 !ndev->netdev_ops->ndo_poll_controller) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000756 np_err(np, "%s doesn't support polling, aborting\n",
757 np->dev_name);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000758 err = -ENOTSUPP;
759 goto out;
760 }
761
762 if (!ndev->npinfo) {
Amerigo Wang47be03a22012-08-10 01:24:37 +0000763 npinfo = kmalloc(sizeof(*npinfo), gfp);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000764 if (!npinfo) {
765 err = -ENOMEM;
766 goto out;
767 }
768
769 npinfo->rx_flags = 0;
770 INIT_LIST_HEAD(&npinfo->rx_np);
771
772 spin_lock_init(&npinfo->rx_lock);
773 skb_queue_head_init(&npinfo->arp_tx);
774 skb_queue_head_init(&npinfo->txq);
775 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
776
777 atomic_set(&npinfo->refcnt, 1);
778
779 ops = np->dev->netdev_ops;
780 if (ops->ndo_netpoll_setup) {
Amerigo Wang47be03a22012-08-10 01:24:37 +0000781 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000782 if (err)
783 goto free_npinfo;
784 }
785 } else {
786 npinfo = ndev->npinfo;
787 atomic_inc(&npinfo->refcnt);
788 }
789
790 npinfo->netpoll = np;
791
792 if (np->rx_hook) {
793 spin_lock_irqsave(&npinfo->rx_lock, flags);
794 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
795 list_add_tail(&np->rx, &npinfo->rx_np);
796 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
797 }
798
799 /* last thing to do is link it to the net device structure */
Eric Dumazetcf778b02012-01-12 04:41:32 +0000800 rcu_assign_pointer(ndev->npinfo, npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000801
802 return 0;
803
804free_npinfo:
805 kfree(npinfo);
806out:
807 return err;
808}
809EXPORT_SYMBOL_GPL(__netpoll_setup);
810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811int netpoll_setup(struct netpoll *np)
812{
813 struct net_device *ndev = NULL;
814 struct in_device *in_dev;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700815 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 if (np->dev_name)
Eric W. Biederman881d9662007-09-17 11:56:21 -0700818 ndev = dev_get_by_name(&init_net, np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 if (!ndev) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000820 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700821 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 }
823
WANG Cong0c1ad042011-06-09 00:28:13 -0700824 if (ndev->master) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000825 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
Dan Carpenter83fe32d2011-06-11 18:55:22 -0700826 err = -EBUSY;
827 goto put;
WANG Cong0c1ad042011-06-09 00:28:13 -0700828 }
829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (!netif_running(ndev)) {
831 unsigned long atmost, atleast;
832
Joe Perchese6ec269352012-01-29 15:50:43 +0000833 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800835 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700836 err = dev_open(ndev);
837 rtnl_unlock();
838
839 if (err) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000840 np_err(np, "failed to open %s\n", ndev->name);
Herbert Xudbaa1542010-06-10 16:12:46 +0000841 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 atleast = jiffies + HZ/10;
Anton Vorontsovbff38772009-07-08 11:10:56 -0700845 atmost = jiffies + carrier_timeout * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 while (!netif_carrier_ok(ndev)) {
847 if (time_after(jiffies, atmost)) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000848 np_notice(np, "timeout waiting for carrier\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 break;
850 }
Anton Vorontsov1b614fb2009-07-08 20:09:44 -0700851 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
853
854 /* If carrier appears to come up instantly, we don't
855 * trust it and pause so that we don't pump all our
856 * queued console messages into the bitbucket.
857 */
858
859 if (time_before(jiffies, atleast)) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000860 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 msleep(4000);
862 }
863 }
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 if (!np->local_ip) {
866 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700867 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869 if (!in_dev || !in_dev->ifa_list) {
870 rcu_read_unlock();
Joe Perchese6ec269352012-01-29 15:50:43 +0000871 np_err(np, "no IP address for %s, aborting\n",
872 np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700873 err = -EDESTADDRREQ;
Herbert Xudbaa1542010-06-10 16:12:46 +0000874 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 }
876
Harvey Harrisone7557af2009-03-28 15:38:31 +0000877 np->local_ip = in_dev->ifa_list->ifa_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 rcu_read_unlock();
Joe Perchese6ec269352012-01-29 15:50:43 +0000879 np_info(np, "local IP %pI4\n", &np->local_ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 }
881
Herbert Xudbaa1542010-06-10 16:12:46 +0000882 /* fill up the skb queue */
883 refill_skbs();
884
885 rtnl_lock();
Amerigo Wang47be03a22012-08-10 01:24:37 +0000886 err = __netpoll_setup(np, ndev, GFP_KERNEL);
Herbert Xudbaa1542010-06-10 16:12:46 +0000887 rtnl_unlock();
Matt Mackall53fb95d2005-08-11 19:27:43 -0700888
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000889 if (err)
890 goto put;
891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 return 0;
893
Jiri Slaby21edbb22010-03-16 05:29:54 +0000894put:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700896 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000898EXPORT_SYMBOL(netpoll_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
David S. Millerc68b9072006-11-14 20:40:49 -0800900static int __init netpoll_init(void)
901{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800902 skb_queue_head_init(&skb_pool);
903 return 0;
904}
905core_initcall(netpoll_init);
906
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000907static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
908{
909 struct netpoll_info *npinfo =
910 container_of(rcu_head, struct netpoll_info, rcu);
911
912 skb_queue_purge(&npinfo->arp_tx);
913 skb_queue_purge(&npinfo->txq);
914
915 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
916 cancel_delayed_work(&npinfo->tx_work);
917
918 /* clean after last, unfinished work */
919 __skb_queue_purge(&npinfo->txq);
920 /* now cancel it again */
921 cancel_delayed_work(&npinfo->tx_work);
922 kfree(npinfo);
923}
924
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000925void __netpoll_cleanup(struct netpoll *np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700927 struct netpoll_info *npinfo;
928 unsigned long flags;
929
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000930 npinfo = np->dev->npinfo;
931 if (!npinfo)
Herbert Xudbaa1542010-06-10 16:12:46 +0000932 return;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700933
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000934 if (!list_empty(&npinfo->rx_np)) {
935 spin_lock_irqsave(&npinfo->rx_lock, flags);
936 list_del(&np->rx);
937 if (list_empty(&npinfo->rx_np))
938 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
939 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Jeff Moyer115c1d62005-06-22 22:05:31 -0700940 }
Herbert Xudbaa1542010-06-10 16:12:46 +0000941
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000942 if (atomic_dec_and_test(&npinfo->refcnt)) {
943 const struct net_device_ops *ops;
944
945 ops = np->dev->netdev_ops;
946 if (ops->ndo_netpoll_cleanup)
947 ops->ndo_netpoll_cleanup(np->dev);
948
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000949 RCU_INIT_POINTER(np->dev->npinfo, NULL);
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000950 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
Herbert Xudbaa1542010-06-10 16:12:46 +0000951 }
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000952}
953EXPORT_SYMBOL_GPL(__netpoll_cleanup);
954
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000955static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
956{
957 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
958
959 __netpoll_cleanup(np);
960 kfree(np);
961}
962
963void __netpoll_free_rcu(struct netpoll *np)
964{
965 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
966}
967EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
968
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000969void netpoll_cleanup(struct netpoll *np)
970{
971 if (!np->dev)
972 return;
973
974 rtnl_lock();
975 __netpoll_cleanup(np);
976 rtnl_unlock();
Herbert Xudbaa1542010-06-10 16:12:46 +0000977
978 dev_put(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 np->dev = NULL;
980}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000981EXPORT_SYMBOL(netpoll_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983int netpoll_trap(void)
984{
985 return atomic_read(&trapped);
986}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000987EXPORT_SYMBOL(netpoll_trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989void netpoll_set_trap(int trap)
990{
991 if (trap)
992 atomic_inc(&trapped);
993 else
994 atomic_dec(&trapped);
995}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996EXPORT_SYMBOL(netpoll_set_trap);