blob: e4ba3e70c1747684ad480815f67b2410e87974a7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Joe Perchese6ec269352012-01-29 15:50:43 +000012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Anton Vorontsovbff38772009-07-08 11:10:56 -070014#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020018#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/inetdevice.h>
20#include <linux/inet.h>
21#include <linux/interrupt.h>
22#include <linux/netpoll.h>
23#include <linux/sched.h>
24#include <linux/delay.h>
25#include <linux/rcupdate.h>
26#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040028#include <linux/export.h>
Amerigo Wang689971b2012-08-10 01:24:49 +000029#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <net/tcp.h>
31#include <net/udp.h>
32#include <asm/unaligned.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -070033#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35/*
36 * We maintain a small pool of fully-sized skbs, to make sure the
37 * message gets out even in extreme OOM situations.
38 */
39
40#define MAX_UDP_CHUNK 1460
41#define MAX_SKBS 32
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080043static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45static atomic_t trapped;
46
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070047#define USEC_PER_POLL 50
David S. Millerd9452e92008-03-04 12:28:49 -080048#define NETPOLL_RX_ENABLED 1
49#define NETPOLL_RX_DROP 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Joe Perches6f706242012-01-29 15:50:44 +000051#define MAX_SKB_SIZE \
52 (sizeof(struct ethhdr) + \
53 sizeof(struct iphdr) + \
54 sizeof(struct udphdr) + \
55 MAX_UDP_CHUNK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Miller3578b0c2010-08-03 00:24:04 -070057static void zap_completion_queue(void);
Amerigo Wang28996562012-08-10 01:24:42 +000058static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Anton Vorontsovbff38772009-07-08 11:10:56 -070060static unsigned int carrier_timeout = 4;
61module_param(carrier_timeout, uint, 0644);
62
Joe Perchese6ec269352012-01-29 15:50:43 +000063#define np_info(np, fmt, ...) \
64 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
65#define np_err(np, fmt, ...) \
66 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_notice(np, fmt, ...) \
68 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
69
David Howellsc4028952006-11-22 14:57:56 +000070static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
David Howells4c1ac1b2006-12-05 14:37:56 +000072 struct netpoll_info *npinfo =
73 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010075 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070077 while ((skb = skb_dequeue(&npinfo->txq))) {
78 struct net_device *dev = skb->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -080079 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -070080 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070082 if (!netif_device_present(dev) || !netif_running(dev)) {
83 __kfree_skb(skb);
84 continue;
85 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
David S. Millerfd2ea0a2008-07-17 01:56:23 -070087 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
88
Ingo Molnar36405432006-12-12 17:20:42 +010089 local_irq_save(flags);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070090 __netif_tx_lock(txq, smp_processor_id());
Tom Herbert734664982011-11-28 16:32:44 +000091 if (netif_xmit_frozen_or_stopped(txq) ||
Stephen Hemminger00829822008-11-20 20:14:53 -080092 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070093 skb_queue_head(&npinfo->txq, skb);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070094 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +010095 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Jarek Poplawski25442ca2007-07-05 17:42:44 -070097 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070098 return;
99 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700100 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +0100101 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 }
103}
104
Al Virob51655b2006-11-14 21:40:42 -0800105static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
106 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Al Virod6f5493c2006-11-14 21:26:08 -0800108 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -0800109
Herbert Xu60476372007-04-09 11:59:39 -0700110 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return 0;
112
Herbert Xufb286bb2005-11-10 13:01:24 -0800113 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Patrick McHardy84fa7932006-08-29 16:44:56 -0700115 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -0800116 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -0800117 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Herbert Xufb286bb2005-11-10 13:01:24 -0800119 skb->csum = psum;
120
121 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
124/*
125 * Check whether delayed processing was scheduled for our NIC. If so,
126 * we attempt to grab the poll lock and use ->poll() to pump the card.
127 * If this fails, either we've recursed in ->poll() or it's already
128 * running on another CPU.
129 *
130 * Note: we don't mask interrupts with this lock because we're using
131 * trylock here and interrupts are already disabled in the softirq
132 * case. Further, we test the poll_owner to avoid recursion on UP
133 * systems where the lock doesn't exist.
134 *
135 * In cases where there is bi-directional communications, reading only
136 * one message at a time can lead to packets being dropped by the
137 * network adapter, forcing superfluous retries and possibly timeouts.
138 * Thus, we set our budget to greater than 1.
139 */
David S. Miller0a7606c2007-10-29 21:28:47 -0700140static int poll_one_napi(struct netpoll_info *npinfo,
141 struct napi_struct *napi, int budget)
142{
143 int work;
144
145 /* net_rx_action's ->poll() invocations and our's are
146 * synchronized by this test which is only made while
147 * holding the napi->poll_lock.
148 */
149 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
150 return budget;
151
David S. Millerd9452e92008-03-04 12:28:49 -0800152 npinfo->rx_flags |= NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700153 atomic_inc(&trapped);
Neil Horman7b363e42008-12-09 23:22:26 -0800154 set_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700155
156 work = napi->poll(napi, budget);
David S. Miller7d18f112009-05-21 23:30:09 -0700157 trace_napi_poll(napi);
David S. Miller0a7606c2007-10-29 21:28:47 -0700158
Neil Horman7b363e42008-12-09 23:22:26 -0800159 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700160 atomic_dec(&trapped);
David S. Millerd9452e92008-03-04 12:28:49 -0800161 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
David S. Miller0a7606c2007-10-29 21:28:47 -0700162
163 return budget - work;
164}
165
Stephen Hemminger51069302007-11-19 19:18:11 -0800166static void poll_napi(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700168 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 int budget = 16;
170
Neil Hormanf13d4932010-10-19 07:04:26 +0000171 list_for_each_entry(napi, &dev->napi_list, dev_list) {
David S. Miller0a7606c2007-10-29 21:28:47 -0700172 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700173 spin_trylock(&napi->poll_lock)) {
Amerigo Wang28996562012-08-10 01:24:42 +0000174 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
175 napi, budget);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700176 spin_unlock(&napi->poll_lock);
David S. Miller0a7606c2007-10-29 21:28:47 -0700177
Amerigo Wang072a9c42012-08-24 21:41:11 +0000178 if (!budget)
David S. Miller0a7606c2007-10-29 21:28:47 -0700179 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 }
182}
183
Neil Horman068c6e92006-06-26 00:04:27 -0700184static void service_arp_queue(struct netpoll_info *npi)
185{
Stephen Hemminger51069302007-11-19 19:18:11 -0800186 if (npi) {
187 struct sk_buff *skb;
Neil Horman068c6e92006-06-26 00:04:27 -0700188
Stephen Hemminger51069302007-11-19 19:18:11 -0800189 while ((skb = skb_dequeue(&npi->arp_tx)))
Amerigo Wang28996562012-08-10 01:24:42 +0000190 netpoll_arp_reply(skb, npi);
Neil Horman068c6e92006-06-26 00:04:27 -0700191 }
Neil Horman068c6e92006-06-26 00:04:27 -0700192}
193
Joe Perches234b9212011-06-30 15:08:57 +0000194static void netpoll_poll_dev(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000196 const struct net_device_ops *ops;
Amerigo Wang28996562012-08-10 01:24:42 +0000197 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
Stephen Hemminger51069302007-11-19 19:18:11 -0800198
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000199 if (!dev || !netif_running(dev))
200 return;
201
202 ops = dev->netdev_ops;
203 if (!ops->ndo_poll_controller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return;
205
206 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800207 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Stephen Hemminger51069302007-11-19 19:18:11 -0800209 poll_napi(dev);
210
Eric Dumazet58e05f32012-02-14 10:11:59 +0000211 if (dev->flags & IFF_SLAVE) {
Amerigo Wang28996562012-08-10 01:24:42 +0000212 if (ni) {
Amerigo Wang5a698af2011-02-17 23:43:34 +0000213 struct net_device *bond_dev = dev->master;
214 struct sk_buff *skb;
Amerigo Wang28996562012-08-10 01:24:42 +0000215 struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
216 while ((skb = skb_dequeue(&ni->arp_tx))) {
Amerigo Wang5a698af2011-02-17 23:43:34 +0000217 skb->dev = bond_dev;
Amerigo Wang28996562012-08-10 01:24:42 +0000218 skb_queue_tail(&bond_ni->arp_tx, skb);
Amerigo Wang5a698af2011-02-17 23:43:34 +0000219 }
220 }
221 }
222
Amerigo Wang28996562012-08-10 01:24:42 +0000223 service_arp_queue(ni);
Neil Horman068c6e92006-06-26 00:04:27 -0700224
David S. Miller3578b0c2010-08-03 00:24:04 -0700225 zap_completion_queue();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227
228static void refill_skbs(void)
229{
230 struct sk_buff *skb;
231 unsigned long flags;
232
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800233 spin_lock_irqsave(&skb_pool.lock, flags);
234 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
236 if (!skb)
237 break;
238
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800239 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800241 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
243
David S. Miller3578b0c2010-08-03 00:24:04 -0700244static void zap_completion_queue(void)
245{
246 unsigned long flags;
247 struct softnet_data *sd = &get_cpu_var(softnet_data);
248
249 if (sd->completion_queue) {
250 struct sk_buff *clist;
251
252 local_irq_save(flags);
253 clist = sd->completion_queue;
254 sd->completion_queue = NULL;
255 local_irq_restore(flags);
256
257 while (clist != NULL) {
258 struct sk_buff *skb = clist;
259 clist = clist->next;
260 if (skb->destructor) {
261 atomic_inc(&skb->users);
262 dev_kfree_skb_any(skb); /* put this one back */
263 } else {
264 __kfree_skb(skb);
265 }
266 }
267 }
268
269 put_cpu_var(softnet_data);
270}
271
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800272static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800274 int count = 0;
275 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
David S. Miller3578b0c2010-08-03 00:24:04 -0700277 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800278 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800282 if (!skb)
283 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800286 if (++count < 10) {
Joe Perches2a49e002011-06-30 15:08:58 +0000287 netpoll_poll_dev(np->dev);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800288 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800290 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 }
292
293 atomic_set(&skb->users, 1);
294 skb_reserve(skb, reserve);
295 return skb;
296}
297
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700298static int netpoll_owner_active(struct net_device *dev)
299{
300 struct napi_struct *napi;
301
302 list_for_each_entry(napi, &dev->napi_list, dev_list) {
303 if (napi->poll_owner == smp_processor_id())
304 return 1;
305 }
306 return 0;
307}
308
Amerigo Wang28996562012-08-10 01:24:42 +0000309/* call with IRQ disabled */
Neil Hormanc2355e12010-10-13 16:01:49 +0000310void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
311 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700313 int status = NETDEV_TX_BUSY;
314 unsigned long tries;
Stephen Hemminger00829822008-11-20 20:14:53 -0800315 const struct net_device_ops *ops = dev->netdev_ops;
Herbert Xude85d992010-06-10 16:12:44 +0000316 /* It is up to the caller to keep npinfo alive. */
Amerigo Wang28996562012-08-10 01:24:42 +0000317 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Amerigo Wang28996562012-08-10 01:24:42 +0000319 WARN_ON_ONCE(!irqs_disabled());
320
321 npinfo = rcu_dereference_bh(np->dev->npinfo);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
323 __kfree_skb(skb);
324 return;
325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700327 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700329 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800330
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700331 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
332
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700333 /* try until next clock tick */
334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
335 tries > 0; --tries) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700336 if (__netif_tx_trylock(txq)) {
Tom Herbert734664982011-11-28 16:32:44 +0000337 if (!netif_xmit_stopped(txq)) {
Amerigo Wang689971b2012-08-10 01:24:49 +0000338 if (vlan_tx_tag_present(skb) &&
339 !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
340 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
341 if (unlikely(!skb))
342 break;
343 skb->vlan_tci = 0;
344 }
345
Stephen Hemminger00829822008-11-20 20:14:53 -0800346 status = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -0700347 if (status == NETDEV_TX_OK)
348 txq_trans_update(txq);
349 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700350 __netif_tx_unlock(txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700351
Andrew Mortone37b8d92006-12-09 14:01:49 -0800352 if (status == NETDEV_TX_OK)
353 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Andrew Mortone37b8d92006-12-09 14:01:49 -0800355 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700356
357 /* tickle device maybe there is some cleanup */
Joe Perches2a49e002011-06-30 15:08:58 +0000358 netpoll_poll_dev(np->dev);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700359
360 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700361 }
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000362
363 WARN_ONCE(!irqs_disabled(),
Amerigo Wang28996562012-08-10 01:24:42 +0000364 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000365 dev->name, ops->ndo_start_xmit);
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700369 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700370 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000371 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
Neil Hormanc2355e12010-10-13 16:01:49 +0000374EXPORT_SYMBOL(netpoll_send_skb_on_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
377{
Eric Dumazet954fba02012-06-12 19:30:21 +0000378 int total_len, ip_len, udp_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 struct sk_buff *skb;
380 struct udphdr *udph;
381 struct iphdr *iph;
382 struct ethhdr *eth;
383
384 udp_len = len + sizeof(*udph);
Eric Dumazet954fba02012-06-12 19:30:21 +0000385 ip_len = udp_len + sizeof(*iph);
386 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Eric Dumazet954fba02012-06-12 19:30:21 +0000388 skb = find_skb(np, total_len + np->dev->needed_tailroom,
389 total_len - len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 if (!skb)
391 return;
392
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300393 skb_copy_to_linear_data(skb, msg, len);
Eric Dumazet954fba02012-06-12 19:30:21 +0000394 skb_put(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300396 skb_push(skb, sizeof(*udph));
397 skb_reset_transport_header(skb);
398 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 udph->source = htons(np->local_port);
400 udph->dest = htons(np->remote_port);
401 udph->len = htons(udp_len);
402 udph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000403 udph->check = csum_tcpudp_magic(np->local_ip,
404 np->remote_ip,
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800405 udp_len, IPPROTO_UDP,
Joe Perches07f07572008-11-19 15:44:53 -0800406 csum_partial(udph, udp_len, 0));
Chris Lalancette8e365ee2006-11-07 14:56:19 -0800407 if (udph->check == 0)
Al Viro5e57dff2006-11-20 18:08:13 -0800408 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700410 skb_push(skb, sizeof(*iph));
411 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700412 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 /* iph->version = 4; iph->ihl = 5; */
415 put_unaligned(0x45, (unsigned char *)iph);
416 iph->tos = 0;
417 put_unaligned(htons(ip_len), &(iph->tot_len));
418 iph->id = 0;
419 iph->frag_off = 0;
420 iph->ttl = 64;
421 iph->protocol = IPPROTO_UDP;
422 iph->check = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000423 put_unaligned(np->local_ip, &(iph->saddr));
424 put_unaligned(np->remote_ip, &(iph->daddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
426
427 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700428 skb_reset_mac_header(skb);
Stephen Hemminger206daaf2006-10-19 23:58:23 -0700429 skb->protocol = eth->h_proto = htons(ETH_P_IP);
Stephen Hemminger09538642007-11-19 19:23:29 -0800430 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
431 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 skb->dev = np->dev;
434
435 netpoll_send_skb(np, skb);
436}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000437EXPORT_SYMBOL(netpoll_send_udp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Amerigo Wang28996562012-08-10 01:24:42 +0000439static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 struct arphdr *arp;
442 unsigned char *arp_ptr;
443 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
Al Viro252e3342006-11-14 20:48:11 -0800444 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800445 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 struct sk_buff *send_skb;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000447 struct netpoll *np, *tmp;
448 unsigned long flags;
Herbert Xuae641942011-11-18 02:20:04 +0000449 int hlen, tlen;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000450 int hits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000452 if (list_empty(&npinfo->rx_np))
453 return;
454
455 /* Before checking the packet, we do some early
456 inspection whether this is interesting at all */
457 spin_lock_irqsave(&npinfo->rx_lock, flags);
458 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
459 if (np->dev == skb->dev)
460 hits++;
461 }
462 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
463
464 /* No netpoll struct is using this dev */
465 if (!hits)
Jeff Moyer115c1d62005-06-22 22:05:31 -0700466 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
468 /* No arp on this interface */
469 if (skb->dev->flags & IFF_NOARP)
470 return;
471
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800472 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 return;
474
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700475 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300476 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melod0a92be2007-03-12 20:56:31 -0300477 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
480 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
481 arp->ar_pro != htons(ETH_P_IP) ||
482 arp->ar_op != htons(ARPOP_REQUEST))
483 return;
484
Neil Horman47bbec02006-12-08 00:05:55 -0800485 arp_ptr = (unsigned char *)(arp+1);
486 /* save the location of the src hw addr */
487 sha = arp_ptr;
488 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 memcpy(&sip, arp_ptr, 4);
Neil Horman47bbec02006-12-08 00:05:55 -0800490 arp_ptr += 4;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000491 /* If we actually cared about dst hw addr,
492 it would get copied here */
Neil Horman47bbec02006-12-08 00:05:55 -0800493 arp_ptr += skb->dev->addr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 memcpy(&tip, arp_ptr, 4);
495
496 /* Should we ignore arp? */
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000497 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 return;
499
Pavel Emelyanov988b7052008-03-03 12:20:57 -0800500 size = arp_hdr_len(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000502 spin_lock_irqsave(&npinfo->rx_lock, flags);
503 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
504 if (tip != np->local_ip)
505 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Herbert Xuae641942011-11-18 02:20:04 +0000507 hlen = LL_RESERVED_SPACE(np->dev);
508 tlen = np->dev->needed_tailroom;
509 send_skb = find_skb(np, size + hlen + tlen, hlen);
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000510 if (!send_skb)
511 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000513 skb_reset_network_header(send_skb);
514 arp = (struct arphdr *) skb_put(send_skb, size);
515 send_skb->dev = skb->dev;
516 send_skb->protocol = htons(ETH_P_ARP);
517
518 /* Fill the device header for the ARP frame */
519 if (dev_hard_header(send_skb, skb->dev, ptype,
520 sha, np->dev->dev_addr,
521 send_skb->len) < 0) {
522 kfree_skb(send_skb);
523 continue;
524 }
525
526 /*
527 * Fill out the arp protocol part.
528 *
529 * we only support ethernet device type,
530 * which (according to RFC 1390) should
531 * always equal 1 (Ethernet).
532 */
533
534 arp->ar_hrd = htons(np->dev->type);
535 arp->ar_pro = htons(ETH_P_IP);
536 arp->ar_hln = np->dev->addr_len;
537 arp->ar_pln = 4;
538 arp->ar_op = htons(type);
539
540 arp_ptr = (unsigned char *)(arp + 1);
541 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
542 arp_ptr += np->dev->addr_len;
543 memcpy(arp_ptr, &tip, 4);
544 arp_ptr += 4;
545 memcpy(arp_ptr, sha, np->dev->addr_len);
546 arp_ptr += np->dev->addr_len;
547 memcpy(arp_ptr, &sip, 4);
548
549 netpoll_send_skb(np, send_skb);
550
551 /* If there are several rx_hooks for the same address,
552 we're fine by sending a single reply */
553 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 }
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000555 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556}
557
Amerigo Wang57c5d462012-08-10 01:24:40 +0000558int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
560 int proto, len, ulen;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000561 int hits = 0;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000562 const struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 struct udphdr *uh;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000564 struct netpoll *np, *tmp;
Neil Horman068c6e92006-06-26 00:04:27 -0700565
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000566 if (list_empty(&npinfo->rx_np))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 goto out;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 if (skb->dev->type != ARPHRD_ETHER)
570 goto out;
571
David S. Millerd9452e92008-03-04 12:28:49 -0800572 /* check if netpoll clients need ARP */
YOSHIFUJI Hideaki724800d2007-03-25 20:13:04 -0700573 if (skb->protocol == htons(ETH_P_ARP) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 atomic_read(&trapped)) {
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000575 skb_queue_tail(&npinfo->arp_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 return 1;
577 }
578
Amerigo Wang689971b2012-08-10 01:24:49 +0000579 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
580 skb = vlan_untag(skb);
581 if (unlikely(!skb))
582 goto out;
583 }
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 proto = ntohs(eth_hdr(skb)->h_proto);
586 if (proto != ETH_P_IP)
587 goto out;
588 if (skb->pkt_type == PACKET_OTHERHOST)
589 goto out;
590 if (skb_shared(skb))
591 goto out;
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
594 goto out;
Eric Dumazete9278a42011-08-26 06:26:15 +0000595 iph = (struct iphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (iph->ihl < 5 || iph->version != 4)
597 goto out;
598 if (!pskb_may_pull(skb, iph->ihl*4))
599 goto out;
Eric Dumazete9278a42011-08-26 06:26:15 +0000600 iph = (struct iphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
602 goto out;
603
604 len = ntohs(iph->tot_len);
605 if (skb->len < len || len < iph->ihl*4)
606 goto out;
607
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700608 /*
609 * Our transport medium may have padded the buffer out.
610 * Now We trim to the true length of the frame.
611 */
612 if (pskb_trim_rcsum(skb, len))
613 goto out;
614
Eric Dumazete9278a42011-08-26 06:26:15 +0000615 iph = (struct iphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (iph->protocol != IPPROTO_UDP)
617 goto out;
618
619 len -= iph->ihl*4;
620 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
621 ulen = ntohs(uh->len);
622
623 if (ulen != len)
624 goto out;
Herbert Xufb286bb2005-11-10 13:01:24 -0800625 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000628 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
629 if (np->local_ip && np->local_ip != iph->daddr)
630 continue;
631 if (np->remote_ip && np->remote_ip != iph->saddr)
632 continue;
633 if (np->local_port && np->local_port != ntohs(uh->dest))
634 continue;
635
636 np->rx_hook(np, ntohs(uh->source),
637 (char *)(uh+1),
638 ulen - sizeof(struct udphdr));
639 hits++;
640 }
641
642 if (!hits)
643 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
645 kfree_skb(skb);
646 return 1;
647
648out:
649 if (atomic_read(&trapped)) {
650 kfree_skb(skb);
651 return 1;
652 }
653
654 return 0;
655}
656
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700657void netpoll_print_options(struct netpoll *np)
658{
Joe Perchese6ec269352012-01-29 15:50:43 +0000659 np_info(np, "local port %d\n", np->local_port);
660 np_info(np, "local IP %pI4\n", &np->local_ip);
661 np_info(np, "interface '%s'\n", np->dev_name);
662 np_info(np, "remote port %d\n", np->remote_port);
663 np_info(np, "remote IP %pI4\n", &np->remote_ip);
664 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700665}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000666EXPORT_SYMBOL(netpoll_print_options);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668int netpoll_parse_options(struct netpoll *np, char *opt)
669{
670 char *cur=opt, *delim;
671
David S. Millerc68b9072006-11-14 20:40:49 -0800672 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if ((delim = strchr(cur, '@')) == NULL)
674 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800675 *delim = 0;
676 np->local_port = simple_strtol(cur, NULL, 10);
677 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
David S. Millerc68b9072006-11-14 20:40:49 -0800681 if (*cur != '/') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 if ((delim = strchr(cur, '/')) == NULL)
683 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800684 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000685 np->local_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800686 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 }
688 cur++;
689
David S. Millerc68b9072006-11-14 20:40:49 -0800690 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 /* parse out dev name */
692 if ((delim = strchr(cur, ',')) == NULL)
693 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800694 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800696 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 }
698 cur++;
699
David S. Millerc68b9072006-11-14 20:40:49 -0800700 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 /* dst port */
702 if ((delim = strchr(cur, '@')) == NULL)
703 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800704 *delim = 0;
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000705 if (*cur == ' ' || *cur == '\t')
Joe Perchese6ec269352012-01-29 15:50:43 +0000706 np_info(np, "warning: whitespace is not allowed\n");
David S. Millerc68b9072006-11-14 20:40:49 -0800707 np->remote_port = simple_strtol(cur, NULL, 10);
708 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
710 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 /* dst ip */
713 if ((delim = strchr(cur, '/')) == NULL)
714 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800715 *delim = 0;
Harvey Harrisone7557af2009-03-28 15:38:31 +0000716 np->remote_ip = in_aton(cur);
David S. Millerc68b9072006-11-14 20:40:49 -0800717 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
David S. Millerc68b9072006-11-14 20:40:49 -0800719 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 /* MAC address */
Alexey Dobriyan4940fc82011-05-07 23:00:07 +0000721 if (!mac_pton(cur, np->remote_mac))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 goto parse_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 }
724
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700725 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727 return 0;
728
729 parse_failed:
Joe Perchese6ec269352012-01-29 15:50:43 +0000730 np_info(np, "couldn't parse config at '%s'!\n", cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return -1;
732}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000733EXPORT_SYMBOL(netpoll_parse_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Amerigo Wang47be03a22012-08-10 01:24:37 +0000735int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000736{
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000737 struct netpoll_info *npinfo;
738 const struct net_device_ops *ops;
739 unsigned long flags;
740 int err;
741
Jiri Pirko30fdd8a02012-07-17 05:22:35 +0000742 np->dev = ndev;
743 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
744
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000745 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
746 !ndev->netdev_ops->ndo_poll_controller) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000747 np_err(np, "%s doesn't support polling, aborting\n",
748 np->dev_name);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000749 err = -ENOTSUPP;
750 goto out;
751 }
752
753 if (!ndev->npinfo) {
Amerigo Wang47be03a22012-08-10 01:24:37 +0000754 npinfo = kmalloc(sizeof(*npinfo), gfp);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000755 if (!npinfo) {
756 err = -ENOMEM;
757 goto out;
758 }
759
760 npinfo->rx_flags = 0;
761 INIT_LIST_HEAD(&npinfo->rx_np);
762
763 spin_lock_init(&npinfo->rx_lock);
764 skb_queue_head_init(&npinfo->arp_tx);
765 skb_queue_head_init(&npinfo->txq);
766 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
767
768 atomic_set(&npinfo->refcnt, 1);
769
770 ops = np->dev->netdev_ops;
771 if (ops->ndo_netpoll_setup) {
Amerigo Wang47be03a22012-08-10 01:24:37 +0000772 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000773 if (err)
774 goto free_npinfo;
775 }
776 } else {
777 npinfo = ndev->npinfo;
778 atomic_inc(&npinfo->refcnt);
779 }
780
781 npinfo->netpoll = np;
782
783 if (np->rx_hook) {
784 spin_lock_irqsave(&npinfo->rx_lock, flags);
785 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
786 list_add_tail(&np->rx, &npinfo->rx_np);
787 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
788 }
789
790 /* last thing to do is link it to the net device structure */
Eric Dumazetcf778b02012-01-12 04:41:32 +0000791 rcu_assign_pointer(ndev->npinfo, npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000792
793 return 0;
794
795free_npinfo:
796 kfree(npinfo);
797out:
798 return err;
799}
800EXPORT_SYMBOL_GPL(__netpoll_setup);
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802int netpoll_setup(struct netpoll *np)
803{
804 struct net_device *ndev = NULL;
805 struct in_device *in_dev;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700806 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
808 if (np->dev_name)
Eric W. Biederman881d9662007-09-17 11:56:21 -0700809 ndev = dev_get_by_name(&init_net, np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 if (!ndev) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000811 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700812 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 }
814
WANG Cong0c1ad042011-06-09 00:28:13 -0700815 if (ndev->master) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000816 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
Dan Carpenter83fe32d2011-06-11 18:55:22 -0700817 err = -EBUSY;
818 goto put;
WANG Cong0c1ad042011-06-09 00:28:13 -0700819 }
820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 if (!netif_running(ndev)) {
822 unsigned long atmost, atleast;
823
Joe Perchese6ec269352012-01-29 15:50:43 +0000824 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Stephen Hemminger6756ae42006-03-20 22:23:58 -0800826 rtnl_lock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700827 err = dev_open(ndev);
828 rtnl_unlock();
829
830 if (err) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000831 np_err(np, "failed to open %s\n", ndev->name);
Herbert Xudbaa1542010-06-10 16:12:46 +0000832 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 atleast = jiffies + HZ/10;
Anton Vorontsovbff38772009-07-08 11:10:56 -0700836 atmost = jiffies + carrier_timeout * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 while (!netif_carrier_ok(ndev)) {
838 if (time_after(jiffies, atmost)) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000839 np_notice(np, "timeout waiting for carrier\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 break;
841 }
Anton Vorontsov1b614fb2009-07-08 20:09:44 -0700842 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 }
844
845 /* If carrier appears to come up instantly, we don't
846 * trust it and pause so that we don't pump all our
847 * queued console messages into the bitbucket.
848 */
849
850 if (time_before(jiffies, atleast)) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000851 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 msleep(4000);
853 }
854 }
855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (!np->local_ip) {
857 rcu_read_lock();
Herbert Xue5ed6392005-10-03 14:35:55 -0700858 in_dev = __in_dev_get_rcu(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
860 if (!in_dev || !in_dev->ifa_list) {
861 rcu_read_unlock();
Joe Perchese6ec269352012-01-29 15:50:43 +0000862 np_err(np, "no IP address for %s, aborting\n",
863 np->dev_name);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700864 err = -EDESTADDRREQ;
Herbert Xudbaa1542010-06-10 16:12:46 +0000865 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 }
867
Harvey Harrisone7557af2009-03-28 15:38:31 +0000868 np->local_ip = in_dev->ifa_list->ifa_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 rcu_read_unlock();
Joe Perchese6ec269352012-01-29 15:50:43 +0000870 np_info(np, "local IP %pI4\n", &np->local_ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 }
872
Herbert Xudbaa1542010-06-10 16:12:46 +0000873 /* fill up the skb queue */
874 refill_skbs();
875
876 rtnl_lock();
Amerigo Wang47be03a22012-08-10 01:24:37 +0000877 err = __netpoll_setup(np, ndev, GFP_KERNEL);
Herbert Xudbaa1542010-06-10 16:12:46 +0000878 rtnl_unlock();
Matt Mackall53fb95d2005-08-11 19:27:43 -0700879
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000880 if (err)
881 goto put;
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 return 0;
884
Jiri Slaby21edbb22010-03-16 05:29:54 +0000885put:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 dev_put(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700887 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000889EXPORT_SYMBOL(netpoll_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
David S. Millerc68b9072006-11-14 20:40:49 -0800891static int __init netpoll_init(void)
892{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800893 skb_queue_head_init(&skb_pool);
894 return 0;
895}
896core_initcall(netpoll_init);
897
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000898static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
899{
900 struct netpoll_info *npinfo =
901 container_of(rcu_head, struct netpoll_info, rcu);
902
903 skb_queue_purge(&npinfo->arp_tx);
904 skb_queue_purge(&npinfo->txq);
905
906 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
907 cancel_delayed_work(&npinfo->tx_work);
908
909 /* clean after last, unfinished work */
910 __skb_queue_purge(&npinfo->txq);
911 /* now cancel it again */
912 cancel_delayed_work(&npinfo->tx_work);
913 kfree(npinfo);
914}
915
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000916void __netpoll_cleanup(struct netpoll *np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700918 struct netpoll_info *npinfo;
919 unsigned long flags;
920
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000921 npinfo = np->dev->npinfo;
922 if (!npinfo)
Herbert Xudbaa1542010-06-10 16:12:46 +0000923 return;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700924
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000925 if (!list_empty(&npinfo->rx_np)) {
926 spin_lock_irqsave(&npinfo->rx_lock, flags);
927 list_del(&np->rx);
928 if (list_empty(&npinfo->rx_np))
929 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
930 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Jeff Moyer115c1d62005-06-22 22:05:31 -0700931 }
Herbert Xudbaa1542010-06-10 16:12:46 +0000932
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000933 if (atomic_dec_and_test(&npinfo->refcnt)) {
934 const struct net_device_ops *ops;
935
936 ops = np->dev->netdev_ops;
937 if (ops->ndo_netpoll_cleanup)
938 ops->ndo_netpoll_cleanup(np->dev);
939
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000940 RCU_INIT_POINTER(np->dev->npinfo, NULL);
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000941 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
Herbert Xudbaa1542010-06-10 16:12:46 +0000942 }
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000943}
944EXPORT_SYMBOL_GPL(__netpoll_cleanup);
945
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000946static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
947{
948 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
949
950 __netpoll_cleanup(np);
951 kfree(np);
952}
953
954void __netpoll_free_rcu(struct netpoll *np)
955{
956 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
957}
958EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
959
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000960void netpoll_cleanup(struct netpoll *np)
961{
962 if (!np->dev)
963 return;
964
965 rtnl_lock();
966 __netpoll_cleanup(np);
967 rtnl_unlock();
Herbert Xudbaa1542010-06-10 16:12:46 +0000968
969 dev_put(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 np->dev = NULL;
971}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000972EXPORT_SYMBOL(netpoll_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974int netpoll_trap(void)
975{
976 return atomic_read(&trapped);
977}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000978EXPORT_SYMBOL(netpoll_trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980void netpoll_set_trap(int trap)
981{
982 if (trap)
983 atomic_inc(&trapped);
984 else
985 atomic_dec(&trapped);
986}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987EXPORT_SYMBOL(netpoll_set_trap);