blob: 457f882b0f7bad4ecddfa449576ad46424a7ef21 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Joe Perchese6ec269352012-01-29 15:50:43 +000012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Anton Vorontsovbff38772009-07-08 11:10:56 -070014#include <linux/moduleparam.h>
Andy Shevchenko4cd57732013-06-04 19:46:26 +030015#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020019#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/inetdevice.h>
21#include <linux/inet.h>
22#include <linux/interrupt.h>
23#include <linux/netpoll.h>
24#include <linux/sched.h>
25#include <linux/delay.h>
26#include <linux/rcupdate.h>
27#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040029#include <linux/export.h>
Amerigo Wang689971b2012-08-10 01:24:49 +000030#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <net/tcp.h>
32#include <net/udp.h>
Cong Wangb3d936f2013-01-07 20:52:41 +000033#include <net/addrconf.h>
34#include <net/ndisc.h>
35#include <net/ip6_checksum.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/unaligned.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -070037#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39/*
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
42 */
43
44#define MAX_UDP_CHUNK 1460
45#define MAX_SKBS 32
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080047static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Lai Jiangshan7f9421c2013-03-15 06:50:52 +000049DEFINE_STATIC_SRCU(netpoll_srcu);
Neil Hormanca99ca12013-02-05 08:05:43 +000050
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070051#define USEC_PER_POLL 50
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Joe Perches6f706242012-01-29 15:50:44 +000053#define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
57 MAX_UDP_CHUNK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
David S. Miller3578b0c2010-08-03 00:24:04 -070059static void zap_completion_queue(void);
Neil Horman2cde6ac2013-02-11 10:25:30 +000060static void netpoll_async_cleanup(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Anton Vorontsovbff38772009-07-08 11:10:56 -070062static unsigned int carrier_timeout = 4;
63module_param(carrier_timeout, uint, 0644);
64
Joe Perchese6ec269352012-01-29 15:50:43 +000065#define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69#define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71
Eric W. Biederman944e2942014-03-27 15:37:28 -070072static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
74{
Eric W. Biederman944e2942014-03-27 15:37:28 -070075 int status = NETDEV_TX_OK;
76 netdev_features_t features;
77
78 features = netif_skb_features(skb);
79
Jiri Pirkodf8a39d2015-01-13 17:13:44 +010080 if (skb_vlan_tag_present(skb) &&
Eric W. Biederman944e2942014-03-27 15:37:28 -070081 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
Jiri Pirko59682502014-11-19 14:04:59 +010082 skb = __vlan_hwaccel_push_inside(skb);
Eric W. Biederman944e2942014-03-27 15:37:28 -070083 if (unlikely(!skb)) {
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
87 */
88 goto out;
89 }
Eric W. Biederman944e2942014-03-27 15:37:28 -070090 }
91
David S. Millerfa2dbdc2014-08-29 21:55:22 -070092 status = netdev_start_xmit(skb, dev, txq, false);
Eric W. Biederman944e2942014-03-27 15:37:28 -070093
94out:
95 return status;
96}
97
David Howellsc4028952006-11-22 14:57:56 +000098static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
David Howells4c1ac1b2006-12-05 14:37:56 +0000100 struct netpoll_info *npinfo =
101 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +0100103 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700107 struct netdev_queue *txq;
Tushar Dave291e6042017-04-20 15:57:31 -0700108 unsigned int q_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700110 if (!netif_device_present(dev) || !netif_running(dev)) {
Eric W. Biederman080b3c12014-03-27 15:41:04 -0700111 kfree_skb(skb);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700112 continue;
113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Ingo Molnar36405432006-12-12 17:20:42 +0100115 local_irq_save(flags);
Tushar Dave291e6042017-04-20 15:57:31 -0700116 /* check if skb->queue_mapping is still valid */
117 q_index = skb_get_queue_mapping(skb);
118 if (unlikely(q_index >= dev->real_num_tx_queues)) {
119 q_index = q_index % dev->real_num_tx_queues;
120 skb_set_queue_mapping(skb, q_index);
121 }
122 txq = netdev_get_tx_queue(dev, q_index);
Eric W. Biederman5efeac42014-03-27 15:42:20 -0700123 HARD_TX_LOCK(dev, txq, smp_processor_id());
Tom Herbert734664982011-11-28 16:32:44 +0000124 if (netif_xmit_frozen_or_stopped(txq) ||
Eric W. Biederman944e2942014-03-27 15:37:28 -0700125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700126 skb_queue_head(&npinfo->txq, skb);
Eric W. Biederman5efeac42014-03-27 15:42:20 -0700127 HARD_TX_UNLOCK(dev, txq);
Ingo Molnar36405432006-12-12 17:20:42 +0100128 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Jarek Poplawski25442ca2007-07-05 17:42:44 -0700130 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700131 return;
132 }
Eric W. Biederman5efeac42014-03-27 15:42:20 -0700133 HARD_TX_UNLOCK(dev, txq);
Ingo Molnar36405432006-12-12 17:20:42 +0100134 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 }
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/*
139 * Check whether delayed processing was scheduled for our NIC. If so,
140 * we attempt to grab the poll lock and use ->poll() to pump the card.
141 * If this fails, either we've recursed in ->poll() or it's already
142 * running on another CPU.
143 *
144 * Note: we don't mask interrupts with this lock because we're using
145 * trylock here and interrupts are already disabled in the softirq
146 * case. Further, we test the poll_owner to avoid recursion on UP
147 * systems where the lock doesn't exist.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 */
Alexander Duyck822d54b2015-09-28 09:16:17 -0700149static void poll_one_napi(struct napi_struct *napi)
David S. Miller0a7606c2007-10-29 21:28:47 -0700150{
Neil Horman2d8bff12015-09-23 14:57:58 -0400151 int work = 0;
David S. Miller0a7606c2007-10-29 21:28:47 -0700152
153 /* net_rx_action's ->poll() invocations and our's are
154 * synchronized by this test which is only made while
155 * holding the napi->poll_lock.
156 */
157 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
Alexander Duyck822d54b2015-09-28 09:16:17 -0700158 return;
David S. Miller0a7606c2007-10-29 21:28:47 -0700159
Neil Horman2d8bff12015-09-23 14:57:58 -0400160 /* If we set this bit but see that it has already been set,
161 * that indicates that napi has been disabled and we need
162 * to abort this operation
163 */
164 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
Alexander Duyck822d54b2015-09-28 09:16:17 -0700165 return;
David S. Miller0a7606c2007-10-29 21:28:47 -0700166
Alexander Duyck822d54b2015-09-28 09:16:17 -0700167 /* We explicilty pass the polling call a budget of 0 to
168 * indicate that we are clearing the Tx path only.
169 */
170 work = napi->poll(napi, 0);
171 WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +0200172 trace_napi_poll(napi, work, 0);
David S. Miller0a7606c2007-10-29 21:28:47 -0700173
Neil Horman7b363e42008-12-09 23:22:26 -0800174 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700175}
176
Alexander Duyck822d54b2015-09-28 09:16:17 -0700177static void poll_napi(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700179 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Neil Hormanf13d4932010-10-19 07:04:26 +0000181 list_for_each_entry(napi, &dev->napi_list, dev_list) {
David S. Miller0a7606c2007-10-29 21:28:47 -0700182 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700183 spin_trylock(&napi->poll_lock)) {
Alexander Duyck822d54b2015-09-28 09:16:17 -0700184 poll_one_napi(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700185 spin_unlock(&napi->poll_lock);
186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
188}
189
Joe Perches234b9212011-06-30 15:08:57 +0000190static void netpoll_poll_dev(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000192 const struct net_device_ops *ops;
Amerigo Wang28996562012-08-10 01:24:42 +0000193 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
Stephen Hemminger51069302007-11-19 19:18:11 -0800194
Neil Hormanca99ca12013-02-05 08:05:43 +0000195 /* Don't do any rx activity if the dev_lock mutex is held
196 * the dev_open/close paths use this to block netpoll activity
197 * while changing device state
198 */
Dan Carpentera3dbbc22013-05-06 02:15:13 +0000199 if (down_trylock(&ni->dev_lock))
Neil Hormanca99ca12013-02-05 08:05:43 +0000200 return;
201
Neil Horman959d5fd2013-02-13 11:32:42 -0500202 if (!netif_running(dev)) {
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000203 up(&ni->dev_lock);
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000204 return;
Neil Horman959d5fd2013-02-13 11:32:42 -0500205 }
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000206
207 ops = dev->netdev_ops;
Neil Horman959d5fd2013-02-13 11:32:42 -0500208 if (!ops->ndo_poll_controller) {
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000209 up(&ni->dev_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 return;
Neil Horman959d5fd2013-02-13 11:32:42 -0500211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800214 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Alexander Duyck822d54b2015-09-28 09:16:17 -0700216 poll_napi(dev);
Stephen Hemminger51069302007-11-19 19:18:11 -0800217
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000218 up(&ni->dev_lock);
Neil Hormanca99ca12013-02-05 08:05:43 +0000219
David S. Miller3578b0c2010-08-03 00:24:04 -0700220 zap_completion_queue();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
Eric W. Biederman66b55522014-03-27 15:39:03 -0700223void netpoll_poll_disable(struct net_device *dev)
Neil Hormanca99ca12013-02-05 08:05:43 +0000224{
225 struct netpoll_info *ni;
226 int idx;
227 might_sleep();
228 idx = srcu_read_lock(&netpoll_srcu);
229 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
230 if (ni)
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000231 down(&ni->dev_lock);
Neil Hormanca99ca12013-02-05 08:05:43 +0000232 srcu_read_unlock(&netpoll_srcu, idx);
Neil Hormanca99ca12013-02-05 08:05:43 +0000233}
Eric W. Biederman66b55522014-03-27 15:39:03 -0700234EXPORT_SYMBOL(netpoll_poll_disable);
Neil Hormanca99ca12013-02-05 08:05:43 +0000235
Eric W. Biederman66b55522014-03-27 15:39:03 -0700236void netpoll_poll_enable(struct net_device *dev)
Neil Hormanca99ca12013-02-05 08:05:43 +0000237{
238 struct netpoll_info *ni;
239 rcu_read_lock();
240 ni = rcu_dereference(dev->npinfo);
241 if (ni)
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000242 up(&ni->dev_lock);
Neil Hormanca99ca12013-02-05 08:05:43 +0000243 rcu_read_unlock();
244}
Eric W. Biederman66b55522014-03-27 15:39:03 -0700245EXPORT_SYMBOL(netpoll_poll_enable);
Neil Hormanca99ca12013-02-05 08:05:43 +0000246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247static void refill_skbs(void)
248{
249 struct sk_buff *skb;
250 unsigned long flags;
251
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800252 spin_lock_irqsave(&skb_pool.lock, flags);
253 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
255 if (!skb)
256 break;
257
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800258 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800260 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
David S. Miller3578b0c2010-08-03 00:24:04 -0700263static void zap_completion_queue(void)
264{
265 unsigned long flags;
266 struct softnet_data *sd = &get_cpu_var(softnet_data);
267
268 if (sd->completion_queue) {
269 struct sk_buff *clist;
270
271 local_irq_save(flags);
272 clist = sd->completion_queue;
273 sd->completion_queue = NULL;
274 local_irq_restore(flags);
275
276 while (clist != NULL) {
277 struct sk_buff *skb = clist;
278 clist = clist->next;
Eric W. Biedermanb1586f02014-04-01 12:21:02 -0700279 if (!skb_irq_freeable(skb)) {
David S. Miller3578b0c2010-08-03 00:24:04 -0700280 atomic_inc(&skb->users);
281 dev_kfree_skb_any(skb); /* put this one back */
282 } else {
283 __kfree_skb(skb);
284 }
285 }
286 }
287
288 put_cpu_var(softnet_data);
289}
290
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800291static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800293 int count = 0;
294 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
David S. Miller3578b0c2010-08-03 00:24:04 -0700296 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800297 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800301 if (!skb)
302 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800305 if (++count < 10) {
Joe Perches2a49e002011-06-30 15:08:58 +0000306 netpoll_poll_dev(np->dev);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800307 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800309 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 }
311
312 atomic_set(&skb->users, 1);
313 skb_reserve(skb, reserve);
314 return skb;
315}
316
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700317static int netpoll_owner_active(struct net_device *dev)
318{
319 struct napi_struct *napi;
320
321 list_for_each_entry(napi, &dev->napi_list, dev_list) {
322 if (napi->poll_owner == smp_processor_id())
323 return 1;
324 }
325 return 0;
326}
327
Amerigo Wang28996562012-08-10 01:24:42 +0000328/* call with IRQ disabled */
Neil Hormanc2355e12010-10-13 16:01:49 +0000329void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
330 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700332 int status = NETDEV_TX_BUSY;
333 unsigned long tries;
Herbert Xude85d992010-06-10 16:12:44 +0000334 /* It is up to the caller to keep npinfo alive. */
Amerigo Wang28996562012-08-10 01:24:42 +0000335 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Amerigo Wang28996562012-08-10 01:24:42 +0000337 WARN_ON_ONCE(!irqs_disabled());
338
339 npinfo = rcu_dereference_bh(np->dev->npinfo);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900340 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
Eric W. Biederman080b3c12014-03-27 15:41:04 -0700341 dev_kfree_skb_irq(skb);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900342 return;
343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700345 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700346 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700347 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800348
Jason Wangf663dd92014-01-10 16:18:26 +0800349 txq = netdev_pick_tx(dev, skb, NULL);
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700350
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700351 /* try until next clock tick */
352 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
353 tries > 0; --tries) {
Eric W. Biederman5efeac42014-03-27 15:42:20 -0700354 if (HARD_TX_TRYLOCK(dev, txq)) {
Eric W. Biederman944e2942014-03-27 15:37:28 -0700355 if (!netif_xmit_stopped(txq))
356 status = netpoll_start_xmit(skb, dev, txq);
Amerigo Wang689971b2012-08-10 01:24:49 +0000357
Eric W. Biederman5efeac42014-03-27 15:42:20 -0700358 HARD_TX_UNLOCK(dev, txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700359
Andrew Mortone37b8d92006-12-09 14:01:49 -0800360 if (status == NETDEV_TX_OK)
361 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Andrew Mortone37b8d92006-12-09 14:01:49 -0800363 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700364
365 /* tickle device maybe there is some cleanup */
Joe Perches2a49e002011-06-30 15:08:58 +0000366 netpoll_poll_dev(np->dev);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700367
368 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700369 }
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000370
371 WARN_ONCE(!irqs_disabled(),
Amerigo Wang28996562012-08-10 01:24:42 +0000372 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
Eric W. Biederman944e2942014-03-27 15:37:28 -0700373 dev->name, dev->netdev_ops->ndo_start_xmit);
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700377 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700378 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000379 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
Neil Hormanc2355e12010-10-13 16:01:49 +0000382EXPORT_SYMBOL(netpoll_send_skb_on_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
385{
Eric Dumazet954fba02012-06-12 19:30:21 +0000386 int total_len, ip_len, udp_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 struct sk_buff *skb;
388 struct udphdr *udph;
389 struct iphdr *iph;
390 struct ethhdr *eth;
Eric Dumazetee130402012-08-24 01:47:26 +0000391 static atomic_t ip_ident;
Cong Wangb3d936f2013-01-07 20:52:41 +0000392 struct ipv6hdr *ip6h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Nikolay Aleksandrovc9fd56b2015-08-28 15:44:25 -0700394 WARN_ON_ONCE(!irqs_disabled());
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 udp_len = len + sizeof(*udph);
Cong Wangb3d936f2013-01-07 20:52:41 +0000397 if (np->ipv6)
398 ip_len = udp_len + sizeof(*ip6h);
399 else
Cong Wangb7394d22013-01-07 20:52:39 +0000400 ip_len = udp_len + sizeof(*iph);
401
Eric Dumazet954fba02012-06-12 19:30:21 +0000402 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
Eric Dumazet954fba02012-06-12 19:30:21 +0000404 skb = find_skb(np, total_len + np->dev->needed_tailroom,
405 total_len - len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (!skb)
407 return;
408
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300409 skb_copy_to_linear_data(skb, msg, len);
Eric Dumazet954fba02012-06-12 19:30:21 +0000410 skb_put(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300412 skb_push(skb, sizeof(*udph));
413 skb_reset_transport_header(skb);
414 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 udph->source = htons(np->local_port);
416 udph->dest = htons(np->remote_port);
417 udph->len = htons(udp_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Cong Wangb3d936f2013-01-07 20:52:41 +0000419 if (np->ipv6) {
420 udph->check = 0;
421 udph->check = csum_ipv6_magic(&np->local_ip.in6,
422 &np->remote_ip.in6,
423 udp_len, IPPROTO_UDP,
424 csum_partial(udph, udp_len, 0));
425 if (udph->check == 0)
426 udph->check = CSUM_MANGLED_0;
427
428 skb_push(skb, sizeof(*ip6h));
429 skb_reset_network_header(skb);
430 ip6h = ipv6_hdr(skb);
431
432 /* ip6h->version = 6; ip6h->priority = 0; */
433 put_unaligned(0x60, (unsigned char *)ip6h);
434 ip6h->flow_lbl[0] = 0;
435 ip6h->flow_lbl[1] = 0;
436 ip6h->flow_lbl[2] = 0;
437
438 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
439 ip6h->nexthdr = IPPROTO_UDP;
440 ip6h->hop_limit = 32;
441 ip6h->saddr = np->local_ip.in6;
442 ip6h->daddr = np->remote_ip.in6;
443
444 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
445 skb_reset_mac_header(skb);
446 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
447 } else {
Cong Wangb7394d22013-01-07 20:52:39 +0000448 udph->check = 0;
449 udph->check = csum_tcpudp_magic(np->local_ip.ip,
450 np->remote_ip.ip,
451 udp_len, IPPROTO_UDP,
452 csum_partial(udph, udp_len, 0));
453 if (udph->check == 0)
454 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Cong Wangb7394d22013-01-07 20:52:39 +0000456 skb_push(skb, sizeof(*iph));
457 skb_reset_network_header(skb);
458 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Cong Wangb7394d22013-01-07 20:52:39 +0000460 /* iph->version = 4; iph->ihl = 5; */
461 put_unaligned(0x45, (unsigned char *)iph);
462 iph->tos = 0;
463 put_unaligned(htons(ip_len), &(iph->tot_len));
464 iph->id = htons(atomic_inc_return(&ip_ident));
465 iph->frag_off = 0;
466 iph->ttl = 64;
467 iph->protocol = IPPROTO_UDP;
468 iph->check = 0;
469 put_unaligned(np->local_ip.ip, &(iph->saddr));
470 put_unaligned(np->remote_ip.ip, &(iph->daddr));
471 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
472
473 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
474 skb_reset_mac_header(skb);
475 skb->protocol = eth->h_proto = htons(ETH_P_IP);
476 }
477
Joe Perchesc62326a2014-01-20 09:52:18 -0800478 ether_addr_copy(eth->h_source, np->dev->dev_addr);
479 ether_addr_copy(eth->h_dest, np->remote_mac);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 skb->dev = np->dev;
482
483 netpoll_send_skb(np, skb);
484}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000485EXPORT_SYMBOL(netpoll_send_udp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700487void netpoll_print_options(struct netpoll *np)
488{
Joe Perchese6ec269352012-01-29 15:50:43 +0000489 np_info(np, "local port %d\n", np->local_port);
Cong Wangb3d936f2013-01-07 20:52:41 +0000490 if (np->ipv6)
491 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
492 else
Cong Wangb7394d22013-01-07 20:52:39 +0000493 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
Joe Perchese6ec269352012-01-29 15:50:43 +0000494 np_info(np, "interface '%s'\n", np->dev_name);
495 np_info(np, "remote port %d\n", np->remote_port);
Cong Wangb3d936f2013-01-07 20:52:41 +0000496 if (np->ipv6)
497 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
498 else
Cong Wangb7394d22013-01-07 20:52:39 +0000499 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
Joe Perchese6ec269352012-01-29 15:50:43 +0000500 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700501}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000502EXPORT_SYMBOL(netpoll_print_options);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700503
Cong Wangb7394d22013-01-07 20:52:39 +0000504static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
505{
506 const char *end;
507
508 if (!strchr(str, ':') &&
509 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
510 if (!*end)
511 return 0;
512 }
513 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
514#if IS_ENABLED(CONFIG_IPV6)
515 if (!*end)
516 return 1;
517#else
518 return -1;
519#endif
520 }
521 return -1;
522}
523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524int netpoll_parse_options(struct netpoll *np, char *opt)
525{
526 char *cur=opt, *delim;
Cong Wangb7394d22013-01-07 20:52:39 +0000527 int ipv6;
Sabrina Dubroca00fe11b2014-02-06 18:34:12 +0100528 bool ipversion_set = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
David S. Millerc68b9072006-11-14 20:40:49 -0800530 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 if ((delim = strchr(cur, '@')) == NULL)
532 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800533 *delim = 0;
Abhijit Pawar4b5511e2012-12-09 23:12:28 +0000534 if (kstrtou16(cur, 10, &np->local_port))
535 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800536 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 }
538 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
David S. Millerc68b9072006-11-14 20:40:49 -0800540 if (*cur != '/') {
Sabrina Dubroca00fe11b2014-02-06 18:34:12 +0100541 ipversion_set = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 if ((delim = strchr(cur, '/')) == NULL)
543 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800544 *delim = 0;
Cong Wangb7394d22013-01-07 20:52:39 +0000545 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
546 if (ipv6 < 0)
547 goto parse_failed;
548 else
549 np->ipv6 = (bool)ipv6;
David S. Millerc68b9072006-11-14 20:40:49 -0800550 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552 cur++;
553
David S. Millerc68b9072006-11-14 20:40:49 -0800554 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* parse out dev name */
556 if ((delim = strchr(cur, ',')) == NULL)
557 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800558 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800560 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 }
562 cur++;
563
David S. Millerc68b9072006-11-14 20:40:49 -0800564 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 /* dst port */
566 if ((delim = strchr(cur, '@')) == NULL)
567 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800568 *delim = 0;
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000569 if (*cur == ' ' || *cur == '\t')
Joe Perchese6ec269352012-01-29 15:50:43 +0000570 np_info(np, "warning: whitespace is not allowed\n");
Abhijit Pawar4b5511e2012-12-09 23:12:28 +0000571 if (kstrtou16(cur, 10, &np->remote_port))
572 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800573 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 }
575 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 /* dst ip */
578 if ((delim = strchr(cur, '/')) == NULL)
579 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800580 *delim = 0;
Cong Wangb7394d22013-01-07 20:52:39 +0000581 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
582 if (ipv6 < 0)
583 goto parse_failed;
Sabrina Dubroca00fe11b2014-02-06 18:34:12 +0100584 else if (ipversion_set && np->ipv6 != (bool)ipv6)
Cong Wangb7394d22013-01-07 20:52:39 +0000585 goto parse_failed;
586 else
587 np->ipv6 = (bool)ipv6;
David S. Millerc68b9072006-11-14 20:40:49 -0800588 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
David S. Millerc68b9072006-11-14 20:40:49 -0800590 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 /* MAC address */
Alexey Dobriyan4940fc82011-05-07 23:00:07 +0000592 if (!mac_pton(cur, np->remote_mac))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 goto parse_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
595
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700596 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598 return 0;
599
600 parse_failed:
Joe Perchese6ec269352012-01-29 15:50:43 +0000601 np_info(np, "couldn't parse config at '%s'!\n", cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return -1;
603}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000604EXPORT_SYMBOL(netpoll_parse_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700606int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000607{
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000608 struct netpoll_info *npinfo;
609 const struct net_device_ops *ops;
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000610 int err;
611
Bjorn Helgaas727ceaa2016-04-05 15:58:22 -0500612 np->dev = ndev;
Jiri Pirko30fdd8a02012-07-17 05:22:35 +0000613 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
Neil Horman2cde6ac2013-02-11 10:25:30 +0000614 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
Jiri Pirko30fdd8a02012-07-17 05:22:35 +0000615
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000616 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
617 !ndev->netdev_ops->ndo_poll_controller) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000618 np_err(np, "%s doesn't support polling, aborting\n",
619 np->dev_name);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000620 err = -ENOTSUPP;
621 goto out;
622 }
623
624 if (!ndev->npinfo) {
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700625 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000626 if (!npinfo) {
627 err = -ENOMEM;
628 goto out;
629 }
630
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000631 sema_init(&npinfo->dev_lock, 1);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000632 skb_queue_head_init(&npinfo->txq);
633 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
634
635 atomic_set(&npinfo->refcnt, 1);
636
637 ops = np->dev->netdev_ops;
638 if (ops->ndo_netpoll_setup) {
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700639 err = ops->ndo_netpoll_setup(ndev, npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000640 if (err)
641 goto free_npinfo;
642 }
643 } else {
Neil Horman0790bbb2013-02-11 10:25:31 +0000644 npinfo = rtnl_dereference(ndev->npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000645 atomic_inc(&npinfo->refcnt);
646 }
647
648 npinfo->netpoll = np;
649
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000650 /* last thing to do is link it to the net device structure */
Eric Dumazetcf778b02012-01-12 04:41:32 +0000651 rcu_assign_pointer(ndev->npinfo, npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000652
653 return 0;
654
655free_npinfo:
656 kfree(npinfo);
657out:
658 return err;
659}
660EXPORT_SYMBOL_GPL(__netpoll_setup);
661
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662int netpoll_setup(struct netpoll *np)
663{
664 struct net_device *ndev = NULL;
665 struct in_device *in_dev;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700666 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Cong Wangf92d3182013-01-14 23:34:06 +0000668 rtnl_lock();
Cong Wang556e6252013-01-27 15:55:21 +0000669 if (np->dev_name) {
670 struct net *net = current->nsproxy->net_ns;
671 ndev = __dev_get_by_name(net, np->dev_name);
672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (!ndev) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000674 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
Cong Wangf92d3182013-01-14 23:34:06 +0000675 err = -ENODEV;
676 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
Cong Wang5bd30d32013-01-17 12:21:08 +0800678 dev_hold(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Jiri Pirko49bd8fb02013-01-03 22:48:55 +0000680 if (netdev_master_upper_dev_get(ndev)) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000681 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
Dan Carpenter83fe32d2011-06-11 18:55:22 -0700682 err = -EBUSY;
683 goto put;
WANG Cong0c1ad042011-06-09 00:28:13 -0700684 }
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 if (!netif_running(ndev)) {
687 unsigned long atmost, atleast;
688
Joe Perchese6ec269352012-01-29 15:50:43 +0000689 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700691 err = dev_open(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700692
693 if (err) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000694 np_err(np, "failed to open %s\n", ndev->name);
Herbert Xudbaa1542010-06-10 16:12:46 +0000695 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Cong Wangf92d3182013-01-14 23:34:06 +0000698 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 atleast = jiffies + HZ/10;
Anton Vorontsovbff38772009-07-08 11:10:56 -0700700 atmost = jiffies + carrier_timeout * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 while (!netif_carrier_ok(ndev)) {
702 if (time_after(jiffies, atmost)) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000703 np_notice(np, "timeout waiting for carrier\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 break;
705 }
Anton Vorontsov1b614fb2009-07-08 20:09:44 -0700706 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 }
708
709 /* If carrier appears to come up instantly, we don't
710 * trust it and pause so that we don't pump all our
711 * queued console messages into the bitbucket.
712 */
713
714 if (time_before(jiffies, atleast)) {
Joe Perchese6ec269352012-01-29 15:50:43 +0000715 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 msleep(4000);
717 }
Cong Wangf92d3182013-01-14 23:34:06 +0000718 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
720
Cong Wangb7394d22013-01-07 20:52:39 +0000721 if (!np->local_ip.ip) {
722 if (!np->ipv6) {
Cong Wangf92d3182013-01-14 23:34:06 +0000723 in_dev = __in_dev_get_rtnl(ndev);
Cong Wangb7394d22013-01-07 20:52:39 +0000724
725 if (!in_dev || !in_dev->ifa_list) {
Cong Wangb7394d22013-01-07 20:52:39 +0000726 np_err(np, "no IP address for %s, aborting\n",
727 np->dev_name);
728 err = -EDESTADDRREQ;
729 goto put;
730 }
731
732 np->local_ip.ip = in_dev->ifa_list->ifa_local;
Cong Wangb7394d22013-01-07 20:52:39 +0000733 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
Cong Wangb3d936f2013-01-07 20:52:41 +0000734 } else {
735#if IS_ENABLED(CONFIG_IPV6)
736 struct inet6_dev *idev;
737
738 err = -EDESTADDRREQ;
Cong Wangb3d936f2013-01-07 20:52:41 +0000739 idev = __in6_dev_get(ndev);
740 if (idev) {
741 struct inet6_ifaddr *ifp;
742
743 read_lock_bh(&idev->lock);
744 list_for_each_entry(ifp, &idev->addr_list, if_list) {
745 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
746 continue;
747 np->local_ip.in6 = ifp->addr;
748 err = 0;
749 break;
750 }
751 read_unlock_bh(&idev->lock);
752 }
Cong Wangb3d936f2013-01-07 20:52:41 +0000753 if (err) {
754 np_err(np, "no IPv6 address for %s, aborting\n",
755 np->dev_name);
756 goto put;
757 } else
758 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
759#else
760 np_err(np, "IPv6 is not supported %s, aborting\n",
761 np->dev_name);
Cong Wange39363a2013-01-22 17:39:11 +0000762 err = -EINVAL;
Cong Wangb3d936f2013-01-07 20:52:41 +0000763 goto put;
764#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 }
767
Herbert Xudbaa1542010-06-10 16:12:46 +0000768 /* fill up the skb queue */
769 refill_skbs();
770
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700771 err = __netpoll_setup(np, ndev);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000772 if (err)
773 goto put;
774
Cong Wangf92d3182013-01-14 23:34:06 +0000775 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 return 0;
777
Jiri Slaby21edbb22010-03-16 05:29:54 +0000778put:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 dev_put(ndev);
Cong Wangf92d3182013-01-14 23:34:06 +0000780unlock:
781 rtnl_unlock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -0700782 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000784EXPORT_SYMBOL(netpoll_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
David S. Millerc68b9072006-11-14 20:40:49 -0800786static int __init netpoll_init(void)
787{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800788 skb_queue_head_init(&skb_pool);
789 return 0;
790}
791core_initcall(netpoll_init);
792
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000793static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
794{
795 struct netpoll_info *npinfo =
796 container_of(rcu_head, struct netpoll_info, rcu);
797
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000798 skb_queue_purge(&npinfo->txq);
799
800 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
801 cancel_delayed_work(&npinfo->tx_work);
802
803 /* clean after last, unfinished work */
804 __skb_queue_purge(&npinfo->txq);
805 /* now cancel it again */
806 cancel_delayed_work(&npinfo->tx_work);
807 kfree(npinfo);
808}
809
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000810void __netpoll_cleanup(struct netpoll *np)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700812 struct netpoll_info *npinfo;
Jeff Moyerfbeec2e2005-06-22 22:05:59 -0700813
Neil Horman0790bbb2013-02-11 10:25:31 +0000814 /* rtnl_dereference would be preferable here but
815 * rcu_cleanup_netpoll path can put us in here safely without
816 * holding the rtnl, so plain rcu_dereference it is
817 */
818 npinfo = rtnl_dereference(np->dev->npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000819 if (!npinfo)
Herbert Xudbaa1542010-06-10 16:12:46 +0000820 return;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -0700821
Neil Hormanca99ca12013-02-05 08:05:43 +0000822 synchronize_srcu(&netpoll_srcu);
823
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000824 if (atomic_dec_and_test(&npinfo->refcnt)) {
825 const struct net_device_ops *ops;
826
827 ops = np->dev->netdev_ops;
828 if (ops->ndo_netpoll_cleanup)
829 ops->ndo_netpoll_cleanup(np->dev);
830
Monam Agarwalfcb144b2014-03-24 00:42:46 +0530831 RCU_INIT_POINTER(np->dev->npinfo, NULL);
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000832 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
david decotignyefa95b02014-07-08 15:14:41 -0700833 } else
834 RCU_INIT_POINTER(np->dev->npinfo, NULL);
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000835}
836EXPORT_SYMBOL_GPL(__netpoll_cleanup);
837
Neil Horman2cde6ac2013-02-11 10:25:30 +0000838static void netpoll_async_cleanup(struct work_struct *work)
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000839{
Neil Horman2cde6ac2013-02-11 10:25:30 +0000840 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000841
Neil Horman2cde6ac2013-02-11 10:25:30 +0000842 rtnl_lock();
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000843 __netpoll_cleanup(np);
Neil Horman2cde6ac2013-02-11 10:25:30 +0000844 rtnl_unlock();
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000845 kfree(np);
846}
847
Neil Horman2cde6ac2013-02-11 10:25:30 +0000848void __netpoll_free_async(struct netpoll *np)
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000849{
Neil Horman2cde6ac2013-02-11 10:25:30 +0000850 schedule_work(&np->cleanup_work);
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000851}
Neil Horman2cde6ac2013-02-11 10:25:30 +0000852EXPORT_SYMBOL_GPL(__netpoll_free_async);
Amerigo Wang38e6bc12012-08-10 01:24:38 +0000853
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000854void netpoll_cleanup(struct netpoll *np)
855{
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000856 rtnl_lock();
Nikolay Aleksandrovd0fe8c882013-09-19 15:02:35 +0200857 if (!np->dev)
858 goto out;
Herbert Xu8fdd95e2010-06-10 16:12:48 +0000859 __netpoll_cleanup(np);
Herbert Xudbaa1542010-06-10 16:12:46 +0000860 dev_put(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 np->dev = NULL;
Nikolay Aleksandrovd0fe8c882013-09-19 15:02:35 +0200862out:
863 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000865EXPORT_SYMBOL(netpoll_cleanup);