blob: 2ad330e02967132369df3794a5c79e5142f7ebd6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
Joe Perchese6ec269352012-01-29 15:50:43 +000012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Anton Vorontsovbff38772009-07-08 11:10:56 -070014#include <linux/moduleparam.h>
Andy Shevchenko4cd57732013-06-04 19:46:26 +030015#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/string.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020019#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/inetdevice.h>
21#include <linux/inet.h>
22#include <linux/interrupt.h>
23#include <linux/netpoll.h>
24#include <linux/sched.h>
25#include <linux/delay.h>
26#include <linux/rcupdate.h>
27#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040029#include <linux/export.h>
Amerigo Wang689971b2012-08-10 01:24:49 +000030#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <net/tcp.h>
32#include <net/udp.h>
Cong Wangb3d936f2013-01-07 20:52:41 +000033#include <net/addrconf.h>
34#include <net/ndisc.h>
35#include <net/ip6_checksum.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/unaligned.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -070037#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39/*
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
42 */
43
44#define MAX_UDP_CHUNK 1460
45#define MAX_SKBS 32
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -080047static struct sk_buff_head skb_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49static atomic_t trapped;
50
Lai Jiangshan7f9421c2013-03-15 06:50:52 +000051DEFINE_STATIC_SRCU(netpoll_srcu);
Neil Hormanca99ca12013-02-05 08:05:43 +000052
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -070053#define USEC_PER_POLL 50
David S. Millerd9452e92008-03-04 12:28:49 -080054#define NETPOLL_RX_ENABLED 1
55#define NETPOLL_RX_DROP 2
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Joe Perches6f706242012-01-29 15:50:44 +000057#define MAX_SKB_SIZE \
58 (sizeof(struct ethhdr) + \
59 sizeof(struct iphdr) + \
60 sizeof(struct udphdr) + \
61 MAX_UDP_CHUNK)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
David S. Miller3578b0c2010-08-03 00:24:04 -070063static void zap_completion_queue(void);
Cong Wangb7394d22013-01-07 20:52:39 +000064static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
Neil Horman2cde6ac2013-02-11 10:25:30 +000065static void netpoll_async_cleanup(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Anton Vorontsovbff38772009-07-08 11:10:56 -070067static unsigned int carrier_timeout = 4;
68module_param(carrier_timeout, uint, 0644);
69
Joe Perchese6ec269352012-01-29 15:50:43 +000070#define np_info(np, fmt, ...) \
71 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
72#define np_err(np, fmt, ...) \
73 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
74#define np_notice(np, fmt, ...) \
75 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
76
David Howellsc4028952006-11-22 14:57:56 +000077static void queue_process(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
David Howells4c1ac1b2006-12-05 14:37:56 +000079 struct netpoll_info *npinfo =
80 container_of(work, struct netpoll_info, tx_work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 struct sk_buff *skb;
Ingo Molnar36405432006-12-12 17:20:42 +010082 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070084 while ((skb = skb_dequeue(&npinfo->txq))) {
85 struct net_device *dev = skb->dev;
Stephen Hemminger00829822008-11-20 20:14:53 -080086 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -070087 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Stephen Hemminger6c43ff12006-10-26 15:46:53 -070089 if (!netif_device_present(dev) || !netif_running(dev)) {
90 __kfree_skb(skb);
91 continue;
92 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
David S. Millerfd2ea0a2008-07-17 01:56:23 -070094 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
95
Ingo Molnar36405432006-12-12 17:20:42 +010096 local_irq_save(flags);
David S. Millerfd2ea0a2008-07-17 01:56:23 -070097 __netif_tx_lock(txq, smp_processor_id());
Tom Herbert734664982011-11-28 16:32:44 +000098 if (netif_xmit_frozen_or_stopped(txq) ||
Stephen Hemminger00829822008-11-20 20:14:53 -080099 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700100 skb_queue_head(&npinfo->txq, skb);
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700101 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +0100102 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Jarek Poplawski25442ca2007-07-05 17:42:44 -0700104 schedule_delayed_work(&npinfo->tx_work, HZ/10);
Stephen Hemminger6c43ff12006-10-26 15:46:53 -0700105 return;
106 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700107 __netif_tx_unlock(txq);
Ingo Molnar36405432006-12-12 17:20:42 +0100108 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 }
110}
111
Al Virob51655b2006-11-14 21:40:42 -0800112static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
113 unsigned short ulen, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Al Virod6f5493c2006-11-14 21:26:08 -0800115 __wsum psum;
Herbert Xufb286bb2005-11-10 13:01:24 -0800116
Herbert Xu60476372007-04-09 11:59:39 -0700117 if (uh->check == 0 || skb_csum_unnecessary(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 return 0;
119
Herbert Xufb286bb2005-11-10 13:01:24 -0800120 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Patrick McHardy84fa7932006-08-29 16:44:56 -0700122 if (skb->ip_summed == CHECKSUM_COMPLETE &&
Al Virod3bc23e2006-11-14 21:24:49 -0800123 !csum_fold(csum_add(psum, skb->csum)))
Herbert Xufb286bb2005-11-10 13:01:24 -0800124 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Herbert Xufb286bb2005-11-10 13:01:24 -0800126 skb->csum = psum;
127
128 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
130
131/*
132 * Check whether delayed processing was scheduled for our NIC. If so,
133 * we attempt to grab the poll lock and use ->poll() to pump the card.
134 * If this fails, either we've recursed in ->poll() or it's already
135 * running on another CPU.
136 *
137 * Note: we don't mask interrupts with this lock because we're using
138 * trylock here and interrupts are already disabled in the softirq
139 * case. Further, we test the poll_owner to avoid recursion on UP
140 * systems where the lock doesn't exist.
141 *
142 * In cases where there is bi-directional communications, reading only
143 * one message at a time can lead to packets being dropped by the
144 * network adapter, forcing superfluous retries and possibly timeouts.
145 * Thus, we set our budget to greater than 1.
146 */
Eric W. Biedermanb249b512014-03-14 20:44:37 -0700147static int poll_one_napi(struct napi_struct *napi, int budget)
David S. Miller0a7606c2007-10-29 21:28:47 -0700148{
149 int work;
150
151 /* net_rx_action's ->poll() invocations and our's are
152 * synchronized by this test which is only made while
153 * holding the napi->poll_lock.
154 */
155 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
156 return budget;
157
Neil Horman7b363e42008-12-09 23:22:26 -0800158 set_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700159
160 work = napi->poll(napi, budget);
Eric W. Biedermane97dc3f2014-03-14 20:47:15 -0700161 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
David S. Miller7d18f112009-05-21 23:30:09 -0700162 trace_napi_poll(napi);
David S. Miller0a7606c2007-10-29 21:28:47 -0700163
Neil Horman7b363e42008-12-09 23:22:26 -0800164 clear_bit(NAPI_STATE_NPSVC, &napi->state);
David S. Miller0a7606c2007-10-29 21:28:47 -0700165
166 return budget - work;
167}
168
Eric W. Biederman9852fbe2014-03-14 20:45:17 -0700169static void poll_napi(struct net_device *dev, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700171 struct napi_struct *napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Neil Hormanf13d4932010-10-19 07:04:26 +0000173 list_for_each_entry(napi, &dev->napi_list, dev_list) {
David S. Miller0a7606c2007-10-29 21:28:47 -0700174 if (napi->poll_owner != smp_processor_id() &&
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700175 spin_trylock(&napi->poll_lock)) {
Eric W. Biedermanb249b512014-03-14 20:44:37 -0700176 budget = poll_one_napi(napi, budget);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700177 spin_unlock(&napi->poll_lock);
178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 }
180}
181
Cong Wangb7394d22013-01-07 20:52:39 +0000182static void service_neigh_queue(struct netpoll_info *npi)
Neil Horman068c6e92006-06-26 00:04:27 -0700183{
Stephen Hemminger51069302007-11-19 19:18:11 -0800184 if (npi) {
185 struct sk_buff *skb;
Neil Horman068c6e92006-06-26 00:04:27 -0700186
Cong Wangb7394d22013-01-07 20:52:39 +0000187 while ((skb = skb_dequeue(&npi->neigh_tx)))
188 netpoll_neigh_reply(skb, npi);
Neil Horman068c6e92006-06-26 00:04:27 -0700189 }
Neil Horman068c6e92006-06-26 00:04:27 -0700190}
191
Joe Perches234b9212011-06-30 15:08:57 +0000192static void netpoll_poll_dev(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000194 const struct net_device_ops *ops;
Amerigo Wang28996562012-08-10 01:24:42 +0000195 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
Eric W. Biederman9852fbe2014-03-14 20:45:17 -0700196 int budget = 16;
Stephen Hemminger51069302007-11-19 19:18:11 -0800197
Neil Hormanca99ca12013-02-05 08:05:43 +0000198 /* Don't do any rx activity if the dev_lock mutex is held
199 * the dev_open/close paths use this to block netpoll activity
200 * while changing device state
201 */
Dan Carpentera3dbbc22013-05-06 02:15:13 +0000202 if (down_trylock(&ni->dev_lock))
Neil Hormanca99ca12013-02-05 08:05:43 +0000203 return;
204
Neil Horman959d5fd2013-02-13 11:32:42 -0500205 if (!netif_running(dev)) {
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000206 up(&ni->dev_lock);
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000207 return;
Neil Horman959d5fd2013-02-13 11:32:42 -0500208 }
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000209
Eric W. Biedermanb249b512014-03-14 20:44:37 -0700210 ni->rx_flags |= NETPOLL_RX_DROP;
211 atomic_inc(&trapped);
212
Pavel Emelyanov5e392732009-05-11 00:36:35 +0000213 ops = dev->netdev_ops;
Neil Horman959d5fd2013-02-13 11:32:42 -0500214 if (!ops->ndo_poll_controller) {
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000215 up(&ni->dev_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return;
Neil Horman959d5fd2013-02-13 11:32:42 -0500217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 /* Process pending work on NIC */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800220 ops->ndo_poll_controller(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Eric W. Biederman9852fbe2014-03-14 20:45:17 -0700222 poll_napi(dev, budget);
Stephen Hemminger51069302007-11-19 19:18:11 -0800223
Eric W. Biedermanb249b512014-03-14 20:44:37 -0700224 atomic_dec(&trapped);
225 ni->rx_flags &= ~NETPOLL_RX_DROP;
226
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000227 up(&ni->dev_lock);
Neil Hormanca99ca12013-02-05 08:05:43 +0000228
Eric Dumazet58e05f32012-02-14 10:11:59 +0000229 if (dev->flags & IFF_SLAVE) {
Amerigo Wang28996562012-08-10 01:24:42 +0000230 if (ni) {
Jiri Pirko49bd8fb02013-01-03 22:48:55 +0000231 struct net_device *bond_dev;
Amerigo Wang5a698af2011-02-17 23:43:34 +0000232 struct sk_buff *skb;
Jiri Pirko49bd8fb02013-01-03 22:48:55 +0000233 struct netpoll_info *bond_ni;
234
235 bond_dev = netdev_master_upper_dev_get_rcu(dev);
236 bond_ni = rcu_dereference_bh(bond_dev->npinfo);
Cong Wangb7394d22013-01-07 20:52:39 +0000237 while ((skb = skb_dequeue(&ni->neigh_tx))) {
Amerigo Wang5a698af2011-02-17 23:43:34 +0000238 skb->dev = bond_dev;
Cong Wangb7394d22013-01-07 20:52:39 +0000239 skb_queue_tail(&bond_ni->neigh_tx, skb);
Amerigo Wang5a698af2011-02-17 23:43:34 +0000240 }
241 }
242 }
243
Cong Wangb7394d22013-01-07 20:52:39 +0000244 service_neigh_queue(ni);
Neil Horman068c6e92006-06-26 00:04:27 -0700245
David S. Miller3578b0c2010-08-03 00:24:04 -0700246 zap_completion_queue();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
dingtianhongda6e3782013-05-27 19:53:31 +0000249void netpoll_rx_disable(struct net_device *dev)
Neil Hormanca99ca12013-02-05 08:05:43 +0000250{
251 struct netpoll_info *ni;
252 int idx;
253 might_sleep();
254 idx = srcu_read_lock(&netpoll_srcu);
255 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
256 if (ni)
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000257 down(&ni->dev_lock);
Neil Hormanca99ca12013-02-05 08:05:43 +0000258 srcu_read_unlock(&netpoll_srcu, idx);
Neil Hormanca99ca12013-02-05 08:05:43 +0000259}
260EXPORT_SYMBOL(netpoll_rx_disable);
261
262void netpoll_rx_enable(struct net_device *dev)
263{
264 struct netpoll_info *ni;
265 rcu_read_lock();
266 ni = rcu_dereference(dev->npinfo);
267 if (ni)
Neil Hormanbd7c4b62013-04-30 05:35:05 +0000268 up(&ni->dev_lock);
Neil Hormanca99ca12013-02-05 08:05:43 +0000269 rcu_read_unlock();
270}
271EXPORT_SYMBOL(netpoll_rx_enable);
272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273static void refill_skbs(void)
274{
275 struct sk_buff *skb;
276 unsigned long flags;
277
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800278 spin_lock_irqsave(&skb_pool.lock, flags);
279 while (skb_pool.qlen < MAX_SKBS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
281 if (!skb)
282 break;
283
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800284 __skb_queue_tail(&skb_pool, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800286 spin_unlock_irqrestore(&skb_pool.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
David S. Miller3578b0c2010-08-03 00:24:04 -0700289static void zap_completion_queue(void)
290{
291 unsigned long flags;
292 struct softnet_data *sd = &get_cpu_var(softnet_data);
293
294 if (sd->completion_queue) {
295 struct sk_buff *clist;
296
297 local_irq_save(flags);
298 clist = sd->completion_queue;
299 sd->completion_queue = NULL;
300 local_irq_restore(flags);
301
302 while (clist != NULL) {
303 struct sk_buff *skb = clist;
304 clist = clist->next;
305 if (skb->destructor) {
306 atomic_inc(&skb->users);
307 dev_kfree_skb_any(skb); /* put this one back */
308 } else {
309 __kfree_skb(skb);
310 }
311 }
312 }
313
314 put_cpu_var(softnet_data);
315}
316
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800317static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800319 int count = 0;
320 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
David S. Miller3578b0c2010-08-03 00:24:04 -0700322 zap_completion_queue();
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800323 refill_skbs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324repeat:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 skb = alloc_skb(len, GFP_ATOMIC);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800327 if (!skb)
328 skb = skb_dequeue(&skb_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 if (!skb) {
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800331 if (++count < 10) {
Joe Perches2a49e002011-06-30 15:08:58 +0000332 netpoll_poll_dev(np->dev);
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800333 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 }
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -0800335 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 }
337
338 atomic_set(&skb->users, 1);
339 skb_reserve(skb, reserve);
340 return skb;
341}
342
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700343static int netpoll_owner_active(struct net_device *dev)
344{
345 struct napi_struct *napi;
346
347 list_for_each_entry(napi, &dev->napi_list, dev_list) {
348 if (napi->poll_owner == smp_processor_id())
349 return 1;
350 }
351 return 0;
352}
353
Amerigo Wang28996562012-08-10 01:24:42 +0000354/* call with IRQ disabled */
Neil Hormanc2355e12010-10-13 16:01:49 +0000355void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
356 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700358 int status = NETDEV_TX_BUSY;
359 unsigned long tries;
Stephen Hemminger00829822008-11-20 20:14:53 -0800360 const struct net_device_ops *ops = dev->netdev_ops;
Herbert Xude85d992010-06-10 16:12:44 +0000361 /* It is up to the caller to keep npinfo alive. */
Amerigo Wang28996562012-08-10 01:24:42 +0000362 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Amerigo Wang28996562012-08-10 01:24:42 +0000364 WARN_ON_ONCE(!irqs_disabled());
365
366 npinfo = rcu_dereference_bh(np->dev->npinfo);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900367 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
368 __kfree_skb(skb);
369 return;
370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700372 /* don't get messages out of order, and no recursion */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700373 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700374 struct netdev_queue *txq;
Andrew Mortona49f99f2006-12-11 17:24:46 -0800375
Jason Wangf663dd92014-01-10 16:18:26 +0800376 txq = netdev_pick_tx(dev, skb, NULL);
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700377
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700378 /* try until next clock tick */
379 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
380 tries > 0; --tries) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700381 if (__netif_tx_trylock(txq)) {
Tom Herbert734664982011-11-28 16:32:44 +0000382 if (!netif_xmit_stopped(txq)) {
Amerigo Wang689971b2012-08-10 01:24:49 +0000383 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +0000384 !vlan_hw_offload_capable(netif_skb_features(skb),
385 skb->vlan_proto)) {
386 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
David S. Milleraca5f582014-01-02 19:50:52 -0500387 if (unlikely(!skb)) {
388 /* This is actually a packet drop, but we
389 * don't want the code at the end of this
390 * function to try and re-queue a NULL skb.
391 */
392 status = NETDEV_TX_OK;
393 goto unlock_txq;
394 }
Amerigo Wang689971b2012-08-10 01:24:49 +0000395 skb->vlan_tci = 0;
396 }
397
Stephen Hemminger00829822008-11-20 20:14:53 -0800398 status = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -0700399 if (status == NETDEV_TX_OK)
400 txq_trans_update(txq);
401 }
David S. Milleraca5f582014-01-02 19:50:52 -0500402 unlock_txq:
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700403 __netif_tx_unlock(txq);
Matt Mackallf0d34592005-08-11 19:25:11 -0700404
Andrew Mortone37b8d92006-12-09 14:01:49 -0800405 if (status == NETDEV_TX_OK)
406 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Andrew Mortone37b8d92006-12-09 14:01:49 -0800408 }
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700409
410 /* tickle device maybe there is some cleanup */
Joe Perches2a49e002011-06-30 15:08:58 +0000411 netpoll_poll_dev(np->dev);
Stephen Hemminger0db3dc72007-06-27 00:39:42 -0700412
413 udelay(USEC_PER_POLL);
Matt Mackall0db1d6f2005-08-11 19:25:54 -0700414 }
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000415
416 WARN_ONCE(!irqs_disabled(),
Amerigo Wang28996562012-08-10 01:24:42 +0000417 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
Dongdong Deng79b1bee2009-08-21 03:33:36 +0000418 dev->name, ops->ndo_start_xmit);
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Stephen Hemminger2bdfe0b2006-10-26 15:46:54 -0700422 if (status != NETDEV_TX_OK) {
Stephen Hemminger5de4a472006-10-26 15:46:55 -0700423 skb_queue_tail(&npinfo->txq, skb);
David Howells4c1ac1b2006-12-05 14:37:56 +0000424 schedule_delayed_work(&npinfo->tx_work,0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426}
Neil Hormanc2355e12010-10-13 16:01:49 +0000427EXPORT_SYMBOL(netpoll_send_skb_on_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
429void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
430{
Eric Dumazet954fba02012-06-12 19:30:21 +0000431 int total_len, ip_len, udp_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 struct sk_buff *skb;
433 struct udphdr *udph;
434 struct iphdr *iph;
435 struct ethhdr *eth;
Eric Dumazetee130402012-08-24 01:47:26 +0000436 static atomic_t ip_ident;
Cong Wangb3d936f2013-01-07 20:52:41 +0000437 struct ipv6hdr *ip6h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
439 udp_len = len + sizeof(*udph);
Cong Wangb3d936f2013-01-07 20:52:41 +0000440 if (np->ipv6)
441 ip_len = udp_len + sizeof(*ip6h);
442 else
Cong Wangb7394d22013-01-07 20:52:39 +0000443 ip_len = udp_len + sizeof(*iph);
444
Eric Dumazet954fba02012-06-12 19:30:21 +0000445 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Eric Dumazet954fba02012-06-12 19:30:21 +0000447 skb = find_skb(np, total_len + np->dev->needed_tailroom,
448 total_len - len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 if (!skb)
450 return;
451
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300452 skb_copy_to_linear_data(skb, msg, len);
Eric Dumazet954fba02012-06-12 19:30:21 +0000453 skb_put(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -0300455 skb_push(skb, sizeof(*udph));
456 skb_reset_transport_header(skb);
457 udph = udp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 udph->source = htons(np->local_port);
459 udph->dest = htons(np->remote_port);
460 udph->len = htons(udp_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Cong Wangb3d936f2013-01-07 20:52:41 +0000462 if (np->ipv6) {
463 udph->check = 0;
464 udph->check = csum_ipv6_magic(&np->local_ip.in6,
465 &np->remote_ip.in6,
466 udp_len, IPPROTO_UDP,
467 csum_partial(udph, udp_len, 0));
468 if (udph->check == 0)
469 udph->check = CSUM_MANGLED_0;
470
471 skb_push(skb, sizeof(*ip6h));
472 skb_reset_network_header(skb);
473 ip6h = ipv6_hdr(skb);
474
475 /* ip6h->version = 6; ip6h->priority = 0; */
476 put_unaligned(0x60, (unsigned char *)ip6h);
477 ip6h->flow_lbl[0] = 0;
478 ip6h->flow_lbl[1] = 0;
479 ip6h->flow_lbl[2] = 0;
480
481 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
482 ip6h->nexthdr = IPPROTO_UDP;
483 ip6h->hop_limit = 32;
484 ip6h->saddr = np->local_ip.in6;
485 ip6h->daddr = np->remote_ip.in6;
486
487 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
488 skb_reset_mac_header(skb);
489 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
490 } else {
Cong Wangb7394d22013-01-07 20:52:39 +0000491 udph->check = 0;
492 udph->check = csum_tcpudp_magic(np->local_ip.ip,
493 np->remote_ip.ip,
494 udp_len, IPPROTO_UDP,
495 csum_partial(udph, udp_len, 0));
496 if (udph->check == 0)
497 udph->check = CSUM_MANGLED_0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Cong Wangb7394d22013-01-07 20:52:39 +0000499 skb_push(skb, sizeof(*iph));
500 skb_reset_network_header(skb);
501 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Cong Wangb7394d22013-01-07 20:52:39 +0000503 /* iph->version = 4; iph->ihl = 5; */
504 put_unaligned(0x45, (unsigned char *)iph);
505 iph->tos = 0;
506 put_unaligned(htons(ip_len), &(iph->tot_len));
507 iph->id = htons(atomic_inc_return(&ip_ident));
508 iph->frag_off = 0;
509 iph->ttl = 64;
510 iph->protocol = IPPROTO_UDP;
511 iph->check = 0;
512 put_unaligned(np->local_ip.ip, &(iph->saddr));
513 put_unaligned(np->remote_ip.ip, &(iph->daddr));
514 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
515
516 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
517 skb_reset_mac_header(skb);
518 skb->protocol = eth->h_proto = htons(ETH_P_IP);
519 }
520
Joe Perchesc62326a2014-01-20 09:52:18 -0800521 ether_addr_copy(eth->h_source, np->dev->dev_addr);
522 ether_addr_copy(eth->h_dest, np->remote_mac);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 skb->dev = np->dev;
525
526 netpoll_send_skb(np, skb);
527}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000528EXPORT_SYMBOL(netpoll_send_udp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Cong Wangb7394d22013-01-07 20:52:39 +0000530static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
Cong Wangb3d936f2013-01-07 20:52:41 +0000532 int size, type = ARPOP_REPLY;
Al Viro252e3342006-11-14 20:48:11 -0800533 __be32 sip, tip;
Neil Horman47bbec02006-12-08 00:05:55 -0800534 unsigned char *sha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 struct sk_buff *send_skb;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000536 struct netpoll *np, *tmp;
537 unsigned long flags;
Herbert Xuae641942011-11-18 02:20:04 +0000538 int hlen, tlen;
Cong Wangb7394d22013-01-07 20:52:39 +0000539 int hits = 0, proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000541 if (list_empty(&npinfo->rx_np))
542 return;
543
544 /* Before checking the packet, we do some early
545 inspection whether this is interesting at all */
546 spin_lock_irqsave(&npinfo->rx_lock, flags);
547 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
548 if (np->dev == skb->dev)
549 hits++;
550 }
551 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
552
553 /* No netpoll struct is using this dev */
554 if (!hits)
Jeff Moyer115c1d62005-06-22 22:05:31 -0700555 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Cong Wangb7394d22013-01-07 20:52:39 +0000557 proto = ntohs(eth_hdr(skb)->h_proto);
Sonic Zhangb0dd6632013-09-11 11:31:53 +0800558 if (proto == ETH_P_ARP) {
Cong Wangb3d936f2013-01-07 20:52:41 +0000559 struct arphdr *arp;
560 unsigned char *arp_ptr;
Cong Wangb7394d22013-01-07 20:52:39 +0000561 /* No arp on this interface */
562 if (skb->dev->flags & IFF_NOARP)
563 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
Cong Wangb7394d22013-01-07 20:52:39 +0000565 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
566 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Cong Wangb7394d22013-01-07 20:52:39 +0000568 skb_reset_network_header(skb);
569 skb_reset_transport_header(skb);
570 arp = arp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Cong Wangb7394d22013-01-07 20:52:39 +0000572 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
573 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
574 arp->ar_pro != htons(ETH_P_IP) ||
575 arp->ar_op != htons(ARPOP_REQUEST))
576 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
Cong Wangb7394d22013-01-07 20:52:39 +0000578 arp_ptr = (unsigned char *)(arp+1);
579 /* save the location of the src hw addr */
580 sha = arp_ptr;
581 arp_ptr += skb->dev->addr_len;
582 memcpy(&sip, arp_ptr, 4);
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000583 arp_ptr += 4;
Cong Wangb7394d22013-01-07 20:52:39 +0000584 /* If we actually cared about dst hw addr,
585 it would get copied here */
586 arp_ptr += skb->dev->addr_len;
587 memcpy(&tip, arp_ptr, 4);
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000588
Cong Wangb7394d22013-01-07 20:52:39 +0000589 /* Should we ignore arp? */
590 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
591 return;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000592
Cong Wangb7394d22013-01-07 20:52:39 +0000593 size = arp_hdr_len(skb->dev);
594
595 spin_lock_irqsave(&npinfo->rx_lock, flags);
596 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
597 if (tip != np->local_ip.ip)
598 continue;
599
600 hlen = LL_RESERVED_SPACE(np->dev);
601 tlen = np->dev->needed_tailroom;
602 send_skb = find_skb(np, size + hlen + tlen, hlen);
603 if (!send_skb)
604 continue;
605
606 skb_reset_network_header(send_skb);
607 arp = (struct arphdr *) skb_put(send_skb, size);
608 send_skb->dev = skb->dev;
609 send_skb->protocol = htons(ETH_P_ARP);
610
611 /* Fill the device header for the ARP frame */
Cong Wangb3d936f2013-01-07 20:52:41 +0000612 if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
Cong Wangb7394d22013-01-07 20:52:39 +0000613 sha, np->dev->dev_addr,
614 send_skb->len) < 0) {
615 kfree_skb(send_skb);
616 continue;
617 }
618
619 /*
620 * Fill out the arp protocol part.
621 *
622 * we only support ethernet device type,
623 * which (according to RFC 1390) should
624 * always equal 1 (Ethernet).
625 */
626
627 arp->ar_hrd = htons(np->dev->type);
628 arp->ar_pro = htons(ETH_P_IP);
629 arp->ar_hln = np->dev->addr_len;
630 arp->ar_pln = 4;
631 arp->ar_op = htons(type);
632
633 arp_ptr = (unsigned char *)(arp + 1);
634 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
635 arp_ptr += np->dev->addr_len;
636 memcpy(arp_ptr, &tip, 4);
637 arp_ptr += 4;
638 memcpy(arp_ptr, sha, np->dev->addr_len);
639 arp_ptr += np->dev->addr_len;
640 memcpy(arp_ptr, &sip, 4);
641
642 netpoll_send_skb(np, send_skb);
643
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200644 /* If there are several rx_skb_hooks for the same
645 * address we're fine by sending a single reply
646 */
Cong Wangb7394d22013-01-07 20:52:39 +0000647 break;
648 }
649 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Cong Wangb3d936f2013-01-07 20:52:41 +0000650 } else if( proto == ETH_P_IPV6) {
651#if IS_ENABLED(CONFIG_IPV6)
652 struct nd_msg *msg;
653 u8 *lladdr = NULL;
654 struct ipv6hdr *hdr;
655 struct icmp6hdr *icmp6h;
656 const struct in6_addr *saddr;
657 const struct in6_addr *daddr;
658 struct inet6_dev *in6_dev = NULL;
659 struct in6_addr *target;
660
661 in6_dev = in6_dev_get(skb->dev);
662 if (!in6_dev || !in6_dev->cnf.accept_ra)
663 return;
664
665 if (!pskb_may_pull(skb, skb->len))
666 return;
667
668 msg = (struct nd_msg *)skb_transport_header(skb);
669
670 __skb_push(skb, skb->data - skb_transport_header(skb));
671
672 if (ipv6_hdr(skb)->hop_limit != 255)
673 return;
674 if (msg->icmph.icmp6_code != 0)
675 return;
676 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
677 return;
678
679 saddr = &ipv6_hdr(skb)->saddr;
680 daddr = &ipv6_hdr(skb)->daddr;
681
682 size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
683
684 spin_lock_irqsave(&npinfo->rx_lock, flags);
685 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
Cong Wangfaeed822013-01-27 15:55:20 +0000686 if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
Cong Wangb3d936f2013-01-07 20:52:41 +0000687 continue;
688
689 hlen = LL_RESERVED_SPACE(np->dev);
690 tlen = np->dev->needed_tailroom;
691 send_skb = find_skb(np, size + hlen + tlen, hlen);
692 if (!send_skb)
693 continue;
694
695 send_skb->protocol = htons(ETH_P_IPV6);
696 send_skb->dev = skb->dev;
697
698 skb_reset_network_header(send_skb);
Amerigo Wang00f97da2013-06-03 16:31:36 +0000699 hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
Cong Wangb3d936f2013-01-07 20:52:41 +0000700 *(__be32*)hdr = htonl(0x60000000);
Cong Wangb3d936f2013-01-07 20:52:41 +0000701 hdr->payload_len = htons(size);
702 hdr->nexthdr = IPPROTO_ICMPV6;
703 hdr->hop_limit = 255;
704 hdr->saddr = *saddr;
705 hdr->daddr = *daddr;
706
Amerigo Wang00f97da2013-06-03 16:31:36 +0000707 icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
Cong Wangb3d936f2013-01-07 20:52:41 +0000708 icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
709 icmp6h->icmp6_router = 0;
710 icmp6h->icmp6_solicited = 1;
Amerigo Wang00f97da2013-06-03 16:31:36 +0000711
712 target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
Cong Wangb3d936f2013-01-07 20:52:41 +0000713 *target = msg->target;
714 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
715 IPPROTO_ICMPV6,
716 csum_partial(icmp6h,
717 size, 0));
718
719 if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
720 lladdr, np->dev->dev_addr,
721 send_skb->len) < 0) {
722 kfree_skb(send_skb);
723 continue;
724 }
725
726 netpoll_send_skb(np, send_skb);
727
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200728 /* If there are several rx_skb_hooks for the same
729 * address, we're fine by sending a single reply
730 */
Cong Wangb3d936f2013-01-07 20:52:41 +0000731 break;
732 }
733 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
734#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736}
737
Cong Wangb3d936f2013-01-07 20:52:41 +0000738static bool pkt_is_ns(struct sk_buff *skb)
739{
740 struct nd_msg *msg;
741 struct ipv6hdr *hdr;
742
743 if (skb->protocol != htons(ETH_P_ARP))
744 return false;
745 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
746 return false;
747
748 msg = (struct nd_msg *)skb_transport_header(skb);
749 __skb_push(skb, skb->data - skb_transport_header(skb));
750 hdr = ipv6_hdr(skb);
751
752 if (hdr->nexthdr != IPPROTO_ICMPV6)
753 return false;
754 if (hdr->hop_limit != 255)
755 return false;
756 if (msg->icmph.icmp6_code != 0)
757 return false;
758 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
759 return false;
760
761 return true;
762}
763
Amerigo Wang57c5d462012-08-10 01:24:40 +0000764int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200766 int proto, len, ulen, data_len;
767 int hits = 0, offset;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000768 const struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 struct udphdr *uh;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000770 struct netpoll *np, *tmp;
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200771 uint16_t source;
Neil Horman068c6e92006-06-26 00:04:27 -0700772
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000773 if (list_empty(&npinfo->rx_np))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 goto out;
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 if (skb->dev->type != ARPHRD_ETHER)
777 goto out;
778
David S. Millerd9452e92008-03-04 12:28:49 -0800779 /* check if netpoll clients need ARP */
Cong Wangb3d936f2013-01-07 20:52:41 +0000780 if (skb->protocol == htons(ETH_P_ARP) && atomic_read(&trapped)) {
781 skb_queue_tail(&npinfo->neigh_tx, skb);
782 return 1;
783 } else if (pkt_is_ns(skb) && atomic_read(&trapped)) {
Cong Wangb7394d22013-01-07 20:52:39 +0000784 skb_queue_tail(&npinfo->neigh_tx, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 return 1;
786 }
787
Amerigo Wang689971b2012-08-10 01:24:49 +0000788 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
789 skb = vlan_untag(skb);
790 if (unlikely(!skb))
791 goto out;
792 }
793
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 proto = ntohs(eth_hdr(skb)->h_proto);
Cong Wangb7394d22013-01-07 20:52:39 +0000795 if (proto != ETH_P_IP && proto != ETH_P_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 goto out;
797 if (skb->pkt_type == PACKET_OTHERHOST)
798 goto out;
799 if (skb_shared(skb))
800 goto out;
801
Cong Wangb7394d22013-01-07 20:52:39 +0000802 if (proto == ETH_P_IP) {
803 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
804 goto out;
805 iph = (struct iphdr *)skb->data;
806 if (iph->ihl < 5 || iph->version != 4)
807 goto out;
808 if (!pskb_may_pull(skb, iph->ihl*4))
809 goto out;
810 iph = (struct iphdr *)skb->data;
811 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
812 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Cong Wangb7394d22013-01-07 20:52:39 +0000814 len = ntohs(iph->tot_len);
815 if (skb->len < len || len < iph->ihl*4)
816 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Cong Wangb7394d22013-01-07 20:52:39 +0000818 /*
819 * Our transport medium may have padded the buffer out.
820 * Now We trim to the true length of the frame.
821 */
822 if (pskb_trim_rcsum(skb, len))
823 goto out;
Aubrey.Li5e7d7fa2007-04-17 12:40:20 -0700824
Cong Wangb7394d22013-01-07 20:52:39 +0000825 iph = (struct iphdr *)skb->data;
826 if (iph->protocol != IPPROTO_UDP)
827 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
Cong Wangb7394d22013-01-07 20:52:39 +0000829 len -= iph->ihl*4;
830 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200831 offset = (unsigned char *)(uh + 1) - skb->data;
Cong Wangb7394d22013-01-07 20:52:39 +0000832 ulen = ntohs(uh->len);
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200833 data_len = skb->len - offset;
834 source = ntohs(uh->source);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
Cong Wangb7394d22013-01-07 20:52:39 +0000836 if (ulen != len)
837 goto out;
838 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
839 goto out;
840 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
841 if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
842 continue;
843 if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
844 continue;
845 if (np->local_port && np->local_port != ntohs(uh->dest))
846 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200848 np->rx_skb_hook(np, source, skb, offset, data_len);
Cong Wangb7394d22013-01-07 20:52:39 +0000849 hits++;
850 }
Cong Wangb3d936f2013-01-07 20:52:41 +0000851 } else {
852#if IS_ENABLED(CONFIG_IPV6)
853 const struct ipv6hdr *ip6h;
854
855 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
856 goto out;
857 ip6h = (struct ipv6hdr *)skb->data;
858 if (ip6h->version != 6)
859 goto out;
860 len = ntohs(ip6h->payload_len);
861 if (!len)
862 goto out;
863 if (len + sizeof(struct ipv6hdr) > skb->len)
864 goto out;
865 if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
866 goto out;
867 ip6h = ipv6_hdr(skb);
868 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
869 goto out;
870 uh = udp_hdr(skb);
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200871 offset = (unsigned char *)(uh + 1) - skb->data;
Cong Wangb3d936f2013-01-07 20:52:41 +0000872 ulen = ntohs(uh->len);
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200873 data_len = skb->len - offset;
874 source = ntohs(uh->source);
Cong Wangb3d936f2013-01-07 20:52:41 +0000875 if (ulen != skb->len)
876 goto out;
877 if (udp6_csum_init(skb, uh, IPPROTO_UDP))
878 goto out;
879 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
Cong Wangfaeed822013-01-27 15:55:20 +0000880 if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
Cong Wangb3d936f2013-01-07 20:52:41 +0000881 continue;
Cong Wangfaeed822013-01-27 15:55:20 +0000882 if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
Cong Wangb3d936f2013-01-07 20:52:41 +0000883 continue;
884 if (np->local_port && np->local_port != ntohs(uh->dest))
885 continue;
886
Antonio Quartulli8fb479a2013-10-23 23:36:30 +0200887 np->rx_skb_hook(np, source, skb, offset, data_len);
Cong Wangb3d936f2013-01-07 20:52:41 +0000888 hits++;
889 }
890#endif
Daniel Borkmann508e14b2010-01-12 14:27:30 +0000891 }
892
893 if (!hits)
894 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
896 kfree_skb(skb);
897 return 1;
898
899out:
900 if (atomic_read(&trapped)) {
901 kfree_skb(skb);
902 return 1;
903 }
904
905 return 0;
906}
907
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700908void netpoll_print_options(struct netpoll *np)
909{
Joe Perchese6ec269352012-01-29 15:50:43 +0000910 np_info(np, "local port %d\n", np->local_port);
Cong Wangb3d936f2013-01-07 20:52:41 +0000911 if (np->ipv6)
912 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
913 else
Cong Wangb7394d22013-01-07 20:52:39 +0000914 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
Joe Perchese6ec269352012-01-29 15:50:43 +0000915 np_info(np, "interface '%s'\n", np->dev_name);
916 np_info(np, "remote port %d\n", np->remote_port);
Cong Wangb3d936f2013-01-07 20:52:41 +0000917 if (np->ipv6)
918 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
919 else
Cong Wangb7394d22013-01-07 20:52:39 +0000920 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
Joe Perchese6ec269352012-01-29 15:50:43 +0000921 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700922}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000923EXPORT_SYMBOL(netpoll_print_options);
Satyam Sharma0bcc1812007-08-10 15:35:05 -0700924
Cong Wangb7394d22013-01-07 20:52:39 +0000925static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
926{
927 const char *end;
928
929 if (!strchr(str, ':') &&
930 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
931 if (!*end)
932 return 0;
933 }
934 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
935#if IS_ENABLED(CONFIG_IPV6)
936 if (!*end)
937 return 1;
938#else
939 return -1;
940#endif
941 }
942 return -1;
943}
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945int netpoll_parse_options(struct netpoll *np, char *opt)
946{
947 char *cur=opt, *delim;
Cong Wangb7394d22013-01-07 20:52:39 +0000948 int ipv6;
Sabrina Dubroca00fe11b2014-02-06 18:34:12 +0100949 bool ipversion_set = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
David S. Millerc68b9072006-11-14 20:40:49 -0800951 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 if ((delim = strchr(cur, '@')) == NULL)
953 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800954 *delim = 0;
Abhijit Pawar4b5511e2012-12-09 23:12:28 +0000955 if (kstrtou16(cur, 10, &np->local_port))
956 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800957 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
959 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
David S. Millerc68b9072006-11-14 20:40:49 -0800961 if (*cur != '/') {
Sabrina Dubroca00fe11b2014-02-06 18:34:12 +0100962 ipversion_set = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 if ((delim = strchr(cur, '/')) == NULL)
964 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800965 *delim = 0;
Cong Wangb7394d22013-01-07 20:52:39 +0000966 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
967 if (ipv6 < 0)
968 goto parse_failed;
969 else
970 np->ipv6 = (bool)ipv6;
David S. Millerc68b9072006-11-14 20:40:49 -0800971 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 }
973 cur++;
974
David S. Millerc68b9072006-11-14 20:40:49 -0800975 if (*cur != ',') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 /* parse out dev name */
977 if ((delim = strchr(cur, ',')) == NULL)
978 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800979 *delim = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
David S. Millerc68b9072006-11-14 20:40:49 -0800981 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 }
983 cur++;
984
David S. Millerc68b9072006-11-14 20:40:49 -0800985 if (*cur != '@') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 /* dst port */
987 if ((delim = strchr(cur, '@')) == NULL)
988 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800989 *delim = 0;
Amerigo Wang5fc05f82010-03-21 22:59:58 +0000990 if (*cur == ' ' || *cur == '\t')
Joe Perchese6ec269352012-01-29 15:50:43 +0000991 np_info(np, "warning: whitespace is not allowed\n");
Abhijit Pawar4b5511e2012-12-09 23:12:28 +0000992 if (kstrtou16(cur, 10, &np->remote_port))
993 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -0800994 cur = delim;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
996 cur++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 /* dst ip */
999 if ((delim = strchr(cur, '/')) == NULL)
1000 goto parse_failed;
David S. Millerc68b9072006-11-14 20:40:49 -08001001 *delim = 0;
Cong Wangb7394d22013-01-07 20:52:39 +00001002 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
1003 if (ipv6 < 0)
1004 goto parse_failed;
Sabrina Dubroca00fe11b2014-02-06 18:34:12 +01001005 else if (ipversion_set && np->ipv6 != (bool)ipv6)
Cong Wangb7394d22013-01-07 20:52:39 +00001006 goto parse_failed;
1007 else
1008 np->ipv6 = (bool)ipv6;
David S. Millerc68b9072006-11-14 20:40:49 -08001009 cur = delim + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
David S. Millerc68b9072006-11-14 20:40:49 -08001011 if (*cur != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 /* MAC address */
Alexey Dobriyan4940fc82011-05-07 23:00:07 +00001013 if (!mac_pton(cur, np->remote_mac))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 goto parse_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 }
1016
Satyam Sharma0bcc1812007-08-10 15:35:05 -07001017 netpoll_print_options(np);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
1019 return 0;
1020
1021 parse_failed:
Joe Perchese6ec269352012-01-29 15:50:43 +00001022 np_info(np, "couldn't parse config at '%s'!\n", cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 return -1;
1024}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +00001025EXPORT_SYMBOL(netpoll_parse_options);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Amerigo Wang47be03a22012-08-10 01:24:37 +00001027int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001028{
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001029 struct netpoll_info *npinfo;
1030 const struct net_device_ops *ops;
1031 unsigned long flags;
1032 int err;
1033
Jiri Pirko30fdd8a02012-07-17 05:22:35 +00001034 np->dev = ndev;
1035 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
Neil Horman2cde6ac2013-02-11 10:25:30 +00001036 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
Jiri Pirko30fdd8a02012-07-17 05:22:35 +00001037
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001038 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
1039 !ndev->netdev_ops->ndo_poll_controller) {
Joe Perchese6ec269352012-01-29 15:50:43 +00001040 np_err(np, "%s doesn't support polling, aborting\n",
1041 np->dev_name);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001042 err = -ENOTSUPP;
1043 goto out;
1044 }
1045
1046 if (!ndev->npinfo) {
Amerigo Wang47be03a22012-08-10 01:24:37 +00001047 npinfo = kmalloc(sizeof(*npinfo), gfp);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001048 if (!npinfo) {
1049 err = -ENOMEM;
1050 goto out;
1051 }
1052
1053 npinfo->rx_flags = 0;
1054 INIT_LIST_HEAD(&npinfo->rx_np);
1055
1056 spin_lock_init(&npinfo->rx_lock);
Neil Hormanbd7c4b62013-04-30 05:35:05 +00001057 sema_init(&npinfo->dev_lock, 1);
Cong Wangb7394d22013-01-07 20:52:39 +00001058 skb_queue_head_init(&npinfo->neigh_tx);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001059 skb_queue_head_init(&npinfo->txq);
1060 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
1061
1062 atomic_set(&npinfo->refcnt, 1);
1063
1064 ops = np->dev->netdev_ops;
1065 if (ops->ndo_netpoll_setup) {
Amerigo Wang47be03a22012-08-10 01:24:37 +00001066 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001067 if (err)
1068 goto free_npinfo;
1069 }
1070 } else {
Neil Horman0790bbb2013-02-11 10:25:31 +00001071 npinfo = rtnl_dereference(ndev->npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001072 atomic_inc(&npinfo->refcnt);
1073 }
1074
1075 npinfo->netpoll = np;
1076
Antonio Quartulli8fb479a2013-10-23 23:36:30 +02001077 if (np->rx_skb_hook) {
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001078 spin_lock_irqsave(&npinfo->rx_lock, flags);
1079 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
1080 list_add_tail(&np->rx, &npinfo->rx_np);
1081 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1082 }
1083
1084 /* last thing to do is link it to the net device structure */
Eric Dumazetcf778b02012-01-12 04:41:32 +00001085 rcu_assign_pointer(ndev->npinfo, npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001086
1087 return 0;
1088
1089free_npinfo:
1090 kfree(npinfo);
1091out:
1092 return err;
1093}
1094EXPORT_SYMBOL_GPL(__netpoll_setup);
1095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096int netpoll_setup(struct netpoll *np)
1097{
1098 struct net_device *ndev = NULL;
1099 struct in_device *in_dev;
Stephen Hemmingerb41848b2006-10-26 15:46:52 -07001100 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Cong Wangf92d3182013-01-14 23:34:06 +00001102 rtnl_lock();
Cong Wang556e6252013-01-27 15:55:21 +00001103 if (np->dev_name) {
1104 struct net *net = current->nsproxy->net_ns;
1105 ndev = __dev_get_by_name(net, np->dev_name);
1106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 if (!ndev) {
Joe Perchese6ec269352012-01-29 15:50:43 +00001108 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
Cong Wangf92d3182013-01-14 23:34:06 +00001109 err = -ENODEV;
1110 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
Cong Wang5bd30d32013-01-17 12:21:08 +08001112 dev_hold(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Jiri Pirko49bd8fb02013-01-03 22:48:55 +00001114 if (netdev_master_upper_dev_get(ndev)) {
Joe Perchese6ec269352012-01-29 15:50:43 +00001115 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
Dan Carpenter83fe32d2011-06-11 18:55:22 -07001116 err = -EBUSY;
1117 goto put;
WANG Cong0c1ad042011-06-09 00:28:13 -07001118 }
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 if (!netif_running(ndev)) {
1121 unsigned long atmost, atleast;
1122
Joe Perchese6ec269352012-01-29 15:50:43 +00001123 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Stephen Hemmingerb41848b2006-10-26 15:46:52 -07001125 err = dev_open(ndev);
Stephen Hemmingerb41848b2006-10-26 15:46:52 -07001126
1127 if (err) {
Joe Perchese6ec269352012-01-29 15:50:43 +00001128 np_err(np, "failed to open %s\n", ndev->name);
Herbert Xudbaa1542010-06-10 16:12:46 +00001129 goto put;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Cong Wangf92d3182013-01-14 23:34:06 +00001132 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 atleast = jiffies + HZ/10;
Anton Vorontsovbff38772009-07-08 11:10:56 -07001134 atmost = jiffies + carrier_timeout * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 while (!netif_carrier_ok(ndev)) {
1136 if (time_after(jiffies, atmost)) {
Joe Perchese6ec269352012-01-29 15:50:43 +00001137 np_notice(np, "timeout waiting for carrier\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 break;
1139 }
Anton Vorontsov1b614fb2009-07-08 20:09:44 -07001140 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 }
1142
1143 /* If carrier appears to come up instantly, we don't
1144 * trust it and pause so that we don't pump all our
1145 * queued console messages into the bitbucket.
1146 */
1147
1148 if (time_before(jiffies, atleast)) {
Joe Perchese6ec269352012-01-29 15:50:43 +00001149 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 msleep(4000);
1151 }
Cong Wangf92d3182013-01-14 23:34:06 +00001152 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 }
1154
Cong Wangb7394d22013-01-07 20:52:39 +00001155 if (!np->local_ip.ip) {
1156 if (!np->ipv6) {
Cong Wangf92d3182013-01-14 23:34:06 +00001157 in_dev = __in_dev_get_rtnl(ndev);
Cong Wangb7394d22013-01-07 20:52:39 +00001158
1159 if (!in_dev || !in_dev->ifa_list) {
Cong Wangb7394d22013-01-07 20:52:39 +00001160 np_err(np, "no IP address for %s, aborting\n",
1161 np->dev_name);
1162 err = -EDESTADDRREQ;
1163 goto put;
1164 }
1165
1166 np->local_ip.ip = in_dev->ifa_list->ifa_local;
Cong Wangb7394d22013-01-07 20:52:39 +00001167 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
Cong Wangb3d936f2013-01-07 20:52:41 +00001168 } else {
1169#if IS_ENABLED(CONFIG_IPV6)
1170 struct inet6_dev *idev;
1171
1172 err = -EDESTADDRREQ;
Cong Wangb3d936f2013-01-07 20:52:41 +00001173 idev = __in6_dev_get(ndev);
1174 if (idev) {
1175 struct inet6_ifaddr *ifp;
1176
1177 read_lock_bh(&idev->lock);
1178 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1179 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
1180 continue;
1181 np->local_ip.in6 = ifp->addr;
1182 err = 0;
1183 break;
1184 }
1185 read_unlock_bh(&idev->lock);
1186 }
Cong Wangb3d936f2013-01-07 20:52:41 +00001187 if (err) {
1188 np_err(np, "no IPv6 address for %s, aborting\n",
1189 np->dev_name);
1190 goto put;
1191 } else
1192 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
1193#else
1194 np_err(np, "IPv6 is not supported %s, aborting\n",
1195 np->dev_name);
Cong Wange39363a2013-01-22 17:39:11 +00001196 err = -EINVAL;
Cong Wangb3d936f2013-01-07 20:52:41 +00001197 goto put;
1198#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 }
1201
Herbert Xudbaa1542010-06-10 16:12:46 +00001202 /* fill up the skb queue */
1203 refill_skbs();
1204
Amerigo Wang47be03a22012-08-10 01:24:37 +00001205 err = __netpoll_setup(np, ndev, GFP_KERNEL);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001206 if (err)
1207 goto put;
1208
Cong Wangf92d3182013-01-14 23:34:06 +00001209 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 return 0;
1211
Jiri Slaby21edbb22010-03-16 05:29:54 +00001212put:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 dev_put(ndev);
Cong Wangf92d3182013-01-14 23:34:06 +00001214unlock:
1215 rtnl_unlock();
Stephen Hemmingerb41848b2006-10-26 15:46:52 -07001216 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +00001218EXPORT_SYMBOL(netpoll_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
David S. Millerc68b9072006-11-14 20:40:49 -08001220static int __init netpoll_init(void)
1221{
Stephen Hemmingera1bcfac2006-11-14 10:43:58 -08001222 skb_queue_head_init(&skb_pool);
1223 return 0;
1224}
1225core_initcall(netpoll_init);
1226
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001227static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
1228{
1229 struct netpoll_info *npinfo =
1230 container_of(rcu_head, struct netpoll_info, rcu);
1231
Cong Wangb7394d22013-01-07 20:52:39 +00001232 skb_queue_purge(&npinfo->neigh_tx);
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001233 skb_queue_purge(&npinfo->txq);
1234
1235 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
1236 cancel_delayed_work(&npinfo->tx_work);
1237
1238 /* clean after last, unfinished work */
1239 __skb_queue_purge(&npinfo->txq);
1240 /* now cancel it again */
1241 cancel_delayed_work(&npinfo->tx_work);
1242 kfree(npinfo);
1243}
1244
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001245void __netpoll_cleanup(struct netpoll *np)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
Jeff Moyerfbeec2e2005-06-22 22:05:59 -07001247 struct netpoll_info *npinfo;
1248 unsigned long flags;
1249
Neil Horman0790bbb2013-02-11 10:25:31 +00001250 /* rtnl_dereference would be preferable here but
1251 * rcu_cleanup_netpoll path can put us in here safely without
1252 * holding the rtnl, so plain rcu_dereference it is
1253 */
1254 npinfo = rtnl_dereference(np->dev->npinfo);
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001255 if (!npinfo)
Herbert Xudbaa1542010-06-10 16:12:46 +00001256 return;
Stephen Hemminger93ec2c72006-10-26 15:46:50 -07001257
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001258 if (!list_empty(&npinfo->rx_np)) {
1259 spin_lock_irqsave(&npinfo->rx_lock, flags);
1260 list_del(&np->rx);
1261 if (list_empty(&npinfo->rx_np))
1262 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
1263 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
Jeff Moyer115c1d62005-06-22 22:05:31 -07001264 }
Herbert Xudbaa1542010-06-10 16:12:46 +00001265
Neil Hormanca99ca12013-02-05 08:05:43 +00001266 synchronize_srcu(&netpoll_srcu);
1267
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001268 if (atomic_dec_and_test(&npinfo->refcnt)) {
1269 const struct net_device_ops *ops;
1270
1271 ops = np->dev->netdev_ops;
1272 if (ops->ndo_netpoll_cleanup)
1273 ops->ndo_netpoll_cleanup(np->dev);
1274
Neil Horman2cde6ac2013-02-11 10:25:30 +00001275 rcu_assign_pointer(np->dev->npinfo, NULL);
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001276 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
Herbert Xudbaa1542010-06-10 16:12:46 +00001277 }
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001278}
1279EXPORT_SYMBOL_GPL(__netpoll_cleanup);
1280
Neil Horman2cde6ac2013-02-11 10:25:30 +00001281static void netpoll_async_cleanup(struct work_struct *work)
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001282{
Neil Horman2cde6ac2013-02-11 10:25:30 +00001283 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001284
Neil Horman2cde6ac2013-02-11 10:25:30 +00001285 rtnl_lock();
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001286 __netpoll_cleanup(np);
Neil Horman2cde6ac2013-02-11 10:25:30 +00001287 rtnl_unlock();
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001288 kfree(np);
1289}
1290
Neil Horman2cde6ac2013-02-11 10:25:30 +00001291void __netpoll_free_async(struct netpoll *np)
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001292{
Neil Horman2cde6ac2013-02-11 10:25:30 +00001293 schedule_work(&np->cleanup_work);
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001294}
Neil Horman2cde6ac2013-02-11 10:25:30 +00001295EXPORT_SYMBOL_GPL(__netpoll_free_async);
Amerigo Wang38e6bc12012-08-10 01:24:38 +00001296
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001297void netpoll_cleanup(struct netpoll *np)
1298{
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001299 rtnl_lock();
Nikolay Aleksandrovd0fe8c882013-09-19 15:02:35 +02001300 if (!np->dev)
1301 goto out;
Herbert Xu8fdd95e2010-06-10 16:12:48 +00001302 __netpoll_cleanup(np);
Herbert Xudbaa1542010-06-10 16:12:46 +00001303 dev_put(np->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 np->dev = NULL;
Nikolay Aleksandrovd0fe8c882013-09-19 15:02:35 +02001305out:
1306 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +00001308EXPORT_SYMBOL(netpoll_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
1310int netpoll_trap(void)
1311{
1312 return atomic_read(&trapped);
1313}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +00001314EXPORT_SYMBOL(netpoll_trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
1316void netpoll_set_trap(int trap)
1317{
1318 if (trap)
1319 atomic_inc(&trapped);
1320 else
1321 atomic_dec(&trapped);
1322}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323EXPORT_SYMBOL(netpoll_set_trap);