Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Common framework for low-level network console, dump, and debugger code |
| 3 | * |
| 4 | * Sep 8 2003 Matt Mackall <mpm@selenic.com> |
| 5 | * |
| 6 | * based on the netconsole code from: |
| 7 | * |
| 8 | * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> |
| 9 | * Copyright (C) 2002 Red Hat, Inc. |
| 10 | */ |
| 11 | |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 13 | |
Anton Vorontsov | bff3877 | 2009-07-08 11:10:56 -0700 | [diff] [blame] | 14 | #include <linux/moduleparam.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/netdevice.h> |
| 16 | #include <linux/etherdevice.h> |
| 17 | #include <linux/string.h> |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 18 | #include <linux/if_arp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/inetdevice.h> |
| 20 | #include <linux/inet.h> |
| 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/netpoll.h> |
| 23 | #include <linux/sched.h> |
| 24 | #include <linux/delay.h> |
| 25 | #include <linux/rcupdate.h> |
| 26 | #include <linux/workqueue.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 28 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <net/tcp.h> |
| 30 | #include <net/udp.h> |
| 31 | #include <asm/unaligned.h> |
David S. Miller | 9cbc1cb | 2009-06-15 03:02:23 -0700 | [diff] [blame] | 32 | #include <trace/events/napi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
| 34 | /* |
| 35 | * We maintain a small pool of fully-sized skbs, to make sure the |
| 36 | * message gets out even in extreme OOM situations. |
| 37 | */ |
| 38 | |
| 39 | #define MAX_UDP_CHUNK 1460 |
| 40 | #define MAX_SKBS 32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 42 | static struct sk_buff_head skb_pool; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
| 44 | static atomic_t trapped; |
| 45 | |
Stephen Hemminger | 2bdfe0b | 2006-10-26 15:46:54 -0700 | [diff] [blame] | 46 | #define USEC_PER_POLL 50 |
David S. Miller | d9452e9 | 2008-03-04 12:28:49 -0800 | [diff] [blame] | 47 | #define NETPOLL_RX_ENABLED 1 |
| 48 | #define NETPOLL_RX_DROP 2 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Joe Perches | 6f70624 | 2012-01-29 15:50:44 +0000 | [diff] [blame] | 50 | #define MAX_SKB_SIZE \ |
| 51 | (sizeof(struct ethhdr) + \ |
| 52 | sizeof(struct iphdr) + \ |
| 53 | sizeof(struct udphdr) + \ |
| 54 | MAX_UDP_CHUNK) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
David S. Miller | 3578b0c | 2010-08-03 00:24:04 -0700 | [diff] [blame] | 56 | static void zap_completion_queue(void); |
Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 57 | static void arp_reply(struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Anton Vorontsov | bff3877 | 2009-07-08 11:10:56 -0700 | [diff] [blame] | 59 | static unsigned int carrier_timeout = 4; |
| 60 | module_param(carrier_timeout, uint, 0644); |
| 61 | |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 62 | #define np_info(np, fmt, ...) \ |
| 63 | pr_info("%s: " fmt, np->name, ##__VA_ARGS__) |
| 64 | #define np_err(np, fmt, ...) \ |
| 65 | pr_err("%s: " fmt, np->name, ##__VA_ARGS__) |
| 66 | #define np_notice(np, fmt, ...) \ |
| 67 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) |
| 68 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 69 | static void queue_process(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | { |
David Howells | 4c1ac1b | 2006-12-05 14:37:56 +0000 | [diff] [blame] | 71 | struct netpoll_info *npinfo = |
| 72 | container_of(work, struct netpoll_info, tx_work.work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | struct sk_buff *skb; |
Ingo Molnar | 3640543 | 2006-12-12 17:20:42 +0100 | [diff] [blame] | 74 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Stephen Hemminger | 6c43ff1 | 2006-10-26 15:46:53 -0700 | [diff] [blame] | 76 | while ((skb = skb_dequeue(&npinfo->txq))) { |
| 77 | struct net_device *dev = skb->dev; |
Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 78 | const struct net_device_ops *ops = dev->netdev_ops; |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 79 | struct netdev_queue *txq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
Stephen Hemminger | 6c43ff1 | 2006-10-26 15:46:53 -0700 | [diff] [blame] | 81 | if (!netif_device_present(dev) || !netif_running(dev)) { |
| 82 | __kfree_skb(skb); |
| 83 | continue; |
| 84 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 86 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
| 87 | |
Ingo Molnar | 3640543 | 2006-12-12 17:20:42 +0100 | [diff] [blame] | 88 | local_irq_save(flags); |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 89 | __netif_tx_lock(txq, smp_processor_id()); |
Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 90 | if (netif_xmit_frozen_or_stopped(txq) || |
Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 91 | ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { |
Stephen Hemminger | 6c43ff1 | 2006-10-26 15:46:53 -0700 | [diff] [blame] | 92 | skb_queue_head(&npinfo->txq, skb); |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 93 | __netif_tx_unlock(txq); |
Ingo Molnar | 3640543 | 2006-12-12 17:20:42 +0100 | [diff] [blame] | 94 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Jarek Poplawski | 25442ca | 2007-07-05 17:42:44 -0700 | [diff] [blame] | 96 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
Stephen Hemminger | 6c43ff1 | 2006-10-26 15:46:53 -0700 | [diff] [blame] | 97 | return; |
| 98 | } |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 99 | __netif_tx_unlock(txq); |
Ingo Molnar | 3640543 | 2006-12-12 17:20:42 +0100 | [diff] [blame] | 100 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | } |
| 102 | } |
| 103 | |
Al Viro | b51655b | 2006-11-14 21:40:42 -0800 | [diff] [blame] | 104 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, |
| 105 | unsigned short ulen, __be32 saddr, __be32 daddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | { |
Al Viro | d6f5493c | 2006-11-14 21:26:08 -0800 | [diff] [blame] | 107 | __wsum psum; |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 108 | |
Herbert Xu | 6047637 | 2007-04-09 11:59:39 -0700 | [diff] [blame] | 109 | if (uh->check == 0 || skb_csum_unnecessary(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | return 0; |
| 111 | |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 112 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 114 | if (skb->ip_summed == CHECKSUM_COMPLETE && |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 115 | !csum_fold(csum_add(psum, skb->csum))) |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 116 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 118 | skb->csum = psum; |
| 119 | |
| 120 | return __skb_checksum_complete(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | /* |
| 124 | * Check whether delayed processing was scheduled for our NIC. If so, |
| 125 | * we attempt to grab the poll lock and use ->poll() to pump the card. |
| 126 | * If this fails, either we've recursed in ->poll() or it's already |
| 127 | * running on another CPU. |
| 128 | * |
| 129 | * Note: we don't mask interrupts with this lock because we're using |
| 130 | * trylock here and interrupts are already disabled in the softirq |
| 131 | * case. Further, we test the poll_owner to avoid recursion on UP |
| 132 | * systems where the lock doesn't exist. |
| 133 | * |
| 134 | * In cases where there is bi-directional communications, reading only |
| 135 | * one message at a time can lead to packets being dropped by the |
| 136 | * network adapter, forcing superfluous retries and possibly timeouts. |
| 137 | * Thus, we set our budget to greater than 1. |
| 138 | */ |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 139 | static int poll_one_napi(struct netpoll_info *npinfo, |
| 140 | struct napi_struct *napi, int budget) |
| 141 | { |
| 142 | int work; |
| 143 | |
| 144 | /* net_rx_action's ->poll() invocations and our's are |
| 145 | * synchronized by this test which is only made while |
| 146 | * holding the napi->poll_lock. |
| 147 | */ |
| 148 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) |
| 149 | return budget; |
| 150 | |
David S. Miller | d9452e9 | 2008-03-04 12:28:49 -0800 | [diff] [blame] | 151 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 152 | atomic_inc(&trapped); |
Neil Horman | 7b363e4 | 2008-12-09 23:22:26 -0800 | [diff] [blame] | 153 | set_bit(NAPI_STATE_NPSVC, &napi->state); |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 154 | |
| 155 | work = napi->poll(napi, budget); |
David S. Miller | 7d18f11 | 2009-05-21 23:30:09 -0700 | [diff] [blame] | 156 | trace_napi_poll(napi); |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 157 | |
Neil Horman | 7b363e4 | 2008-12-09 23:22:26 -0800 | [diff] [blame] | 158 | clear_bit(NAPI_STATE_NPSVC, &napi->state); |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 159 | atomic_dec(&trapped); |
David S. Miller | d9452e9 | 2008-03-04 12:28:49 -0800 | [diff] [blame] | 160 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 161 | |
| 162 | return budget - work; |
| 163 | } |
| 164 | |
Stephen Hemminger | 5106930 | 2007-11-19 19:18:11 -0800 | [diff] [blame] | 165 | static void poll_napi(struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | { |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 167 | struct napi_struct *napi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | int budget = 16; |
| 169 | |
Neil Horman | f13d493 | 2010-10-19 07:04:26 +0000 | [diff] [blame] | 170 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 171 | if (napi->poll_owner != smp_processor_id() && |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 172 | spin_trylock(&napi->poll_lock)) { |
Stephen Hemminger | 5106930 | 2007-11-19 19:18:11 -0800 | [diff] [blame] | 173 | budget = poll_one_napi(dev->npinfo, napi, budget); |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 174 | spin_unlock(&napi->poll_lock); |
David S. Miller | 0a7606c | 2007-10-29 21:28:47 -0700 | [diff] [blame] | 175 | |
| 176 | if (!budget) |
| 177 | break; |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 178 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } |
| 180 | } |
| 181 | |
Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 182 | static void service_arp_queue(struct netpoll_info *npi) |
| 183 | { |
Stephen Hemminger | 5106930 | 2007-11-19 19:18:11 -0800 | [diff] [blame] | 184 | if (npi) { |
| 185 | struct sk_buff *skb; |
Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 186 | |
Stephen Hemminger | 5106930 | 2007-11-19 19:18:11 -0800 | [diff] [blame] | 187 | while ((skb = skb_dequeue(&npi->arp_tx))) |
| 188 | arp_reply(skb); |
Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 189 | } |
Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 190 | } |
| 191 | |
Joe Perches | 234b921 | 2011-06-30 15:08:57 +0000 | [diff] [blame] | 192 | static void netpoll_poll_dev(struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | { |
Pavel Emelyanov | 5e39273 | 2009-05-11 00:36:35 +0000 | [diff] [blame] | 194 | const struct net_device_ops *ops; |
Stephen Hemminger | 5106930 | 2007-11-19 19:18:11 -0800 | [diff] [blame] | 195 | |
Pavel Emelyanov | 5e39273 | 2009-05-11 00:36:35 +0000 | [diff] [blame] | 196 | if (!dev || !netif_running(dev)) |
| 197 | return; |
| 198 | |
| 199 | ops = dev->netdev_ops; |
| 200 | if (!ops->ndo_poll_controller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | return; |
| 202 | |
| 203 | /* Process pending work on NIC */ |
Stephen Hemminger | d314774 | 2008-11-19 21:32:24 -0800 | [diff] [blame] | 204 | ops->ndo_poll_controller(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | |
Stephen Hemminger | 5106930 | 2007-11-19 19:18:11 -0800 | [diff] [blame] | 206 | poll_napi(dev); |
| 207 | |
Eric Dumazet | 58e05f3 | 2012-02-14 10:11:59 +0000 | [diff] [blame] | 208 | if (dev->flags & IFF_SLAVE) { |
Amerigo Wang | 5a698af | 2011-02-17 23:43:34 +0000 | [diff] [blame] | 209 | if (dev->npinfo) { |
| 210 | struct net_device *bond_dev = dev->master; |
| 211 | struct sk_buff *skb; |
| 212 | while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { |
| 213 | skb->dev = bond_dev; |
| 214 | skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); |
| 215 | } |
| 216 | } |
| 217 | } |
| 218 | |
Stephen Hemminger | 5106930 | 2007-11-19 19:18:11 -0800 | [diff] [blame] | 219 | service_arp_queue(dev->npinfo); |
Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 220 | |
David S. Miller | 3578b0c | 2010-08-03 00:24:04 -0700 | [diff] [blame] | 221 | zap_completion_queue(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } |
| 223 | |
| 224 | static void refill_skbs(void) |
| 225 | { |
| 226 | struct sk_buff *skb; |
| 227 | unsigned long flags; |
| 228 | |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 229 | spin_lock_irqsave(&skb_pool.lock, flags); |
| 230 | while (skb_pool.qlen < MAX_SKBS) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); |
| 232 | if (!skb) |
| 233 | break; |
| 234 | |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 235 | __skb_queue_tail(&skb_pool, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | } |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 237 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | } |
| 239 | |
David S. Miller | 3578b0c | 2010-08-03 00:24:04 -0700 | [diff] [blame] | 240 | static void zap_completion_queue(void) |
| 241 | { |
| 242 | unsigned long flags; |
| 243 | struct softnet_data *sd = &get_cpu_var(softnet_data); |
| 244 | |
| 245 | if (sd->completion_queue) { |
| 246 | struct sk_buff *clist; |
| 247 | |
| 248 | local_irq_save(flags); |
| 249 | clist = sd->completion_queue; |
| 250 | sd->completion_queue = NULL; |
| 251 | local_irq_restore(flags); |
| 252 | |
| 253 | while (clist != NULL) { |
| 254 | struct sk_buff *skb = clist; |
| 255 | clist = clist->next; |
| 256 | if (skb->destructor) { |
| 257 | atomic_inc(&skb->users); |
| 258 | dev_kfree_skb_any(skb); /* put this one back */ |
| 259 | } else { |
| 260 | __kfree_skb(skb); |
| 261 | } |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | put_cpu_var(softnet_data); |
| 266 | } |
| 267 | |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 268 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | { |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 270 | int count = 0; |
| 271 | struct sk_buff *skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | |
David S. Miller | 3578b0c | 2010-08-03 00:24:04 -0700 | [diff] [blame] | 273 | zap_completion_queue(); |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 274 | refill_skbs(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | repeat: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
| 277 | skb = alloc_skb(len, GFP_ATOMIC); |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 278 | if (!skb) |
| 279 | skb = skb_dequeue(&skb_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
| 281 | if (!skb) { |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 282 | if (++count < 10) { |
Joe Perches | 2a49e00 | 2011-06-30 15:08:58 +0000 | [diff] [blame] | 283 | netpoll_poll_dev(np->dev); |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 284 | goto repeat; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | } |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 286 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | } |
| 288 | |
| 289 | atomic_set(&skb->users, 1); |
| 290 | skb_reserve(skb, reserve); |
| 291 | return skb; |
| 292 | } |
| 293 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 294 | static int netpoll_owner_active(struct net_device *dev) |
| 295 | { |
| 296 | struct napi_struct *napi; |
| 297 | |
| 298 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
| 299 | if (napi->poll_owner == smp_processor_id()) |
| 300 | return 1; |
| 301 | } |
| 302 | return 0; |
| 303 | } |
| 304 | |
Neil Horman | c2355e1 | 2010-10-13 16:01:49 +0000 | [diff] [blame] | 305 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
| 306 | struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | { |
Stephen Hemminger | 2bdfe0b | 2006-10-26 15:46:54 -0700 | [diff] [blame] | 308 | int status = NETDEV_TX_BUSY; |
| 309 | unsigned long tries; |
Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 310 | const struct net_device_ops *ops = dev->netdev_ops; |
Herbert Xu | de85d99 | 2010-06-10 16:12:44 +0000 | [diff] [blame] | 311 | /* It is up to the caller to keep npinfo alive. */ |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 312 | struct netpoll_info *npinfo = np->dev->npinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 314 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
| 315 | __kfree_skb(skb); |
| 316 | return; |
| 317 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | |
Stephen Hemminger | 2bdfe0b | 2006-10-26 15:46:54 -0700 | [diff] [blame] | 319 | /* don't get messages out of order, and no recursion */ |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 320 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 321 | struct netdev_queue *txq; |
Andrew Morton | a49f99f | 2006-12-11 17:24:46 -0800 | [diff] [blame] | 322 | unsigned long flags; |
| 323 | |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 324 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
| 325 | |
Andrew Morton | a49f99f | 2006-12-11 17:24:46 -0800 | [diff] [blame] | 326 | local_irq_save(flags); |
Stephen Hemminger | 0db3dc7 | 2007-06-27 00:39:42 -0700 | [diff] [blame] | 327 | /* try until next clock tick */ |
| 328 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
| 329 | tries > 0; --tries) { |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 330 | if (__netif_tx_trylock(txq)) { |
Tom Herbert | 73466498 | 2011-11-28 16:32:44 +0000 | [diff] [blame] | 331 | if (!netif_xmit_stopped(txq)) { |
Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 332 | status = ops->ndo_start_xmit(skb, dev); |
Eric Dumazet | 08baf56 | 2009-05-25 22:58:01 -0700 | [diff] [blame] | 333 | if (status == NETDEV_TX_OK) |
| 334 | txq_trans_update(txq); |
| 335 | } |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 336 | __netif_tx_unlock(txq); |
Matt Mackall | f0d3459 | 2005-08-11 19:25:11 -0700 | [diff] [blame] | 337 | |
Andrew Morton | e37b8d9 | 2006-12-09 14:01:49 -0800 | [diff] [blame] | 338 | if (status == NETDEV_TX_OK) |
| 339 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | |
Andrew Morton | e37b8d9 | 2006-12-09 14:01:49 -0800 | [diff] [blame] | 341 | } |
Stephen Hemminger | 0db3dc7 | 2007-06-27 00:39:42 -0700 | [diff] [blame] | 342 | |
| 343 | /* tickle device maybe there is some cleanup */ |
Joe Perches | 2a49e00 | 2011-06-30 15:08:58 +0000 | [diff] [blame] | 344 | netpoll_poll_dev(np->dev); |
Stephen Hemminger | 0db3dc7 | 2007-06-27 00:39:42 -0700 | [diff] [blame] | 345 | |
| 346 | udelay(USEC_PER_POLL); |
Matt Mackall | 0db1d6f | 2005-08-11 19:25:54 -0700 | [diff] [blame] | 347 | } |
Dongdong Deng | 79b1bee | 2009-08-21 03:33:36 +0000 | [diff] [blame] | 348 | |
| 349 | WARN_ONCE(!irqs_disabled(), |
| 350 | "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", |
| 351 | dev->name, ops->ndo_start_xmit); |
| 352 | |
Andrew Morton | a49f99f | 2006-12-11 17:24:46 -0800 | [diff] [blame] | 353 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | |
Stephen Hemminger | 2bdfe0b | 2006-10-26 15:46:54 -0700 | [diff] [blame] | 356 | if (status != NETDEV_TX_OK) { |
Stephen Hemminger | 5de4a47 | 2006-10-26 15:46:55 -0700 | [diff] [blame] | 357 | skb_queue_tail(&npinfo->txq, skb); |
David Howells | 4c1ac1b | 2006-12-05 14:37:56 +0000 | [diff] [blame] | 358 | schedule_delayed_work(&npinfo->tx_work,0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | } |
Neil Horman | c2355e1 | 2010-10-13 16:01:49 +0000 | [diff] [blame] | 361 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | |
| 363 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
| 364 | { |
| 365 | int total_len, eth_len, ip_len, udp_len; |
| 366 | struct sk_buff *skb; |
| 367 | struct udphdr *udph; |
| 368 | struct iphdr *iph; |
| 369 | struct ethhdr *eth; |
| 370 | |
| 371 | udp_len = len + sizeof(*udph); |
| 372 | ip_len = eth_len = udp_len + sizeof(*iph); |
| 373 | total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; |
| 374 | |
| 375 | skb = find_skb(np, total_len, total_len - len); |
| 376 | if (!skb) |
| 377 | return; |
| 378 | |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 379 | skb_copy_to_linear_data(skb, msg, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | skb->len += len; |
| 381 | |
Arnaldo Carvalho de Melo | 4bedb45 | 2007-03-13 14:28:48 -0300 | [diff] [blame] | 382 | skb_push(skb, sizeof(*udph)); |
| 383 | skb_reset_transport_header(skb); |
| 384 | udph = udp_hdr(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | udph->source = htons(np->local_port); |
| 386 | udph->dest = htons(np->remote_port); |
| 387 | udph->len = htons(udp_len); |
| 388 | udph->check = 0; |
Harvey Harrison | e7557af | 2009-03-28 15:38:31 +0000 | [diff] [blame] | 389 | udph->check = csum_tcpudp_magic(np->local_ip, |
| 390 | np->remote_ip, |
Chris Lalancette | 8e365ee | 2006-11-07 14:56:19 -0800 | [diff] [blame] | 391 | udp_len, IPPROTO_UDP, |
Joe Perches | 07f0757 | 2008-11-19 15:44:53 -0800 | [diff] [blame] | 392 | csum_partial(udph, udp_len, 0)); |
Chris Lalancette | 8e365ee | 2006-11-07 14:56:19 -0800 | [diff] [blame] | 393 | if (udph->check == 0) |
Al Viro | 5e57dff | 2006-11-20 18:08:13 -0800 | [diff] [blame] | 394 | udph->check = CSUM_MANGLED_0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
Arnaldo Carvalho de Melo | e2d1bca | 2007-04-10 20:46:21 -0700 | [diff] [blame] | 396 | skb_push(skb, sizeof(*iph)); |
| 397 | skb_reset_network_header(skb); |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 398 | iph = ip_hdr(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | |
| 400 | /* iph->version = 4; iph->ihl = 5; */ |
| 401 | put_unaligned(0x45, (unsigned char *)iph); |
| 402 | iph->tos = 0; |
| 403 | put_unaligned(htons(ip_len), &(iph->tot_len)); |
| 404 | iph->id = 0; |
| 405 | iph->frag_off = 0; |
| 406 | iph->ttl = 64; |
| 407 | iph->protocol = IPPROTO_UDP; |
| 408 | iph->check = 0; |
Harvey Harrison | e7557af | 2009-03-28 15:38:31 +0000 | [diff] [blame] | 409 | put_unaligned(np->local_ip, &(iph->saddr)); |
| 410 | put_unaligned(np->remote_ip, &(iph->daddr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
| 412 | |
| 413 | eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); |
Arnaldo Carvalho de Melo | 459a98e | 2007-03-19 15:30:44 -0700 | [diff] [blame] | 414 | skb_reset_mac_header(skb); |
Stephen Hemminger | 206daaf | 2006-10-19 23:58:23 -0700 | [diff] [blame] | 415 | skb->protocol = eth->h_proto = htons(ETH_P_IP); |
Stephen Hemminger | 0953864 | 2007-11-19 19:23:29 -0800 | [diff] [blame] | 416 | memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN); |
| 417 | memcpy(eth->h_dest, np->remote_mac, ETH_ALEN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | |
| 419 | skb->dev = np->dev; |
| 420 | |
| 421 | netpoll_send_skb(np, skb); |
| 422 | } |
Eric Dumazet | 9e34a5b | 2010-07-09 21:22:04 +0000 | [diff] [blame] | 423 | EXPORT_SYMBOL(netpoll_send_udp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | |
| 425 | static void arp_reply(struct sk_buff *skb) |
| 426 | { |
Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 427 | struct netpoll_info *npinfo = skb->dev->npinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | struct arphdr *arp; |
| 429 | unsigned char *arp_ptr; |
| 430 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
Al Viro | 252e334 | 2006-11-14 20:48:11 -0800 | [diff] [blame] | 431 | __be32 sip, tip; |
Neil Horman | 47bbec0 | 2006-12-08 00:05:55 -0800 | [diff] [blame] | 432 | unsigned char *sha; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | struct sk_buff *send_skb; |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 434 | struct netpoll *np, *tmp; |
| 435 | unsigned long flags; |
Herbert Xu | ae64194 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 436 | int hlen, tlen; |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 437 | int hits = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 439 | if (list_empty(&npinfo->rx_np)) |
| 440 | return; |
| 441 | |
| 442 | /* Before checking the packet, we do some early |
| 443 | inspection whether this is interesting at all */ |
| 444 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 445 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
| 446 | if (np->dev == skb->dev) |
| 447 | hits++; |
| 448 | } |
| 449 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 450 | |
| 451 | /* No netpoll struct is using this dev */ |
| 452 | if (!hits) |
Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 453 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | |
| 455 | /* No arp on this interface */ |
| 456 | if (skb->dev->flags & IFF_NOARP) |
| 457 | return; |
| 458 | |
Pavel Emelyanov | 988b705 | 2008-03-03 12:20:57 -0800 | [diff] [blame] | 459 | if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | return; |
| 461 | |
Arnaldo Carvalho de Melo | c1d2bbe | 2007-04-10 20:45:18 -0700 | [diff] [blame] | 462 | skb_reset_network_header(skb); |
Arnaldo Carvalho de Melo | badff6d | 2007-03-13 13:06:52 -0300 | [diff] [blame] | 463 | skb_reset_transport_header(skb); |
Arnaldo Carvalho de Melo | d0a92be | 2007-03-12 20:56:31 -0300 | [diff] [blame] | 464 | arp = arp_hdr(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | |
| 466 | if ((arp->ar_hrd != htons(ARPHRD_ETHER) && |
| 467 | arp->ar_hrd != htons(ARPHRD_IEEE802)) || |
| 468 | arp->ar_pro != htons(ETH_P_IP) || |
| 469 | arp->ar_op != htons(ARPOP_REQUEST)) |
| 470 | return; |
| 471 | |
Neil Horman | 47bbec0 | 2006-12-08 00:05:55 -0800 | [diff] [blame] | 472 | arp_ptr = (unsigned char *)(arp+1); |
| 473 | /* save the location of the src hw addr */ |
| 474 | sha = arp_ptr; |
| 475 | arp_ptr += skb->dev->addr_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | memcpy(&sip, arp_ptr, 4); |
Neil Horman | 47bbec0 | 2006-12-08 00:05:55 -0800 | [diff] [blame] | 477 | arp_ptr += 4; |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 478 | /* If we actually cared about dst hw addr, |
| 479 | it would get copied here */ |
Neil Horman | 47bbec0 | 2006-12-08 00:05:55 -0800 | [diff] [blame] | 480 | arp_ptr += skb->dev->addr_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | memcpy(&tip, arp_ptr, 4); |
| 482 | |
| 483 | /* Should we ignore arp? */ |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 484 | if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | return; |
| 486 | |
Pavel Emelyanov | 988b705 | 2008-03-03 12:20:57 -0800 | [diff] [blame] | 487 | size = arp_hdr_len(skb->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 489 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 490 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
| 491 | if (tip != np->local_ip) |
| 492 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | |
Herbert Xu | ae64194 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 494 | hlen = LL_RESERVED_SPACE(np->dev); |
| 495 | tlen = np->dev->needed_tailroom; |
| 496 | send_skb = find_skb(np, size + hlen + tlen, hlen); |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 497 | if (!send_skb) |
| 498 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 500 | skb_reset_network_header(send_skb); |
| 501 | arp = (struct arphdr *) skb_put(send_skb, size); |
| 502 | send_skb->dev = skb->dev; |
| 503 | send_skb->protocol = htons(ETH_P_ARP); |
| 504 | |
| 505 | /* Fill the device header for the ARP frame */ |
| 506 | if (dev_hard_header(send_skb, skb->dev, ptype, |
| 507 | sha, np->dev->dev_addr, |
| 508 | send_skb->len) < 0) { |
| 509 | kfree_skb(send_skb); |
| 510 | continue; |
| 511 | } |
| 512 | |
| 513 | /* |
| 514 | * Fill out the arp protocol part. |
| 515 | * |
| 516 | * we only support ethernet device type, |
| 517 | * which (according to RFC 1390) should |
| 518 | * always equal 1 (Ethernet). |
| 519 | */ |
| 520 | |
| 521 | arp->ar_hrd = htons(np->dev->type); |
| 522 | arp->ar_pro = htons(ETH_P_IP); |
| 523 | arp->ar_hln = np->dev->addr_len; |
| 524 | arp->ar_pln = 4; |
| 525 | arp->ar_op = htons(type); |
| 526 | |
| 527 | arp_ptr = (unsigned char *)(arp + 1); |
| 528 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); |
| 529 | arp_ptr += np->dev->addr_len; |
| 530 | memcpy(arp_ptr, &tip, 4); |
| 531 | arp_ptr += 4; |
| 532 | memcpy(arp_ptr, sha, np->dev->addr_len); |
| 533 | arp_ptr += np->dev->addr_len; |
| 534 | memcpy(arp_ptr, &sip, 4); |
| 535 | |
| 536 | netpoll_send_skb(np, send_skb); |
| 537 | |
| 538 | /* If there are several rx_hooks for the same address, |
| 539 | we're fine by sending a single reply */ |
| 540 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | } |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 542 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | } |
| 544 | |
| 545 | int __netpoll_rx(struct sk_buff *skb) |
| 546 | { |
| 547 | int proto, len, ulen; |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 548 | int hits = 0; |
Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 549 | const struct iphdr *iph; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | struct udphdr *uh; |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 551 | struct netpoll_info *npinfo = skb->dev->npinfo; |
| 552 | struct netpoll *np, *tmp; |
Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 553 | |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 554 | if (list_empty(&npinfo->rx_np)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | goto out; |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 556 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | if (skb->dev->type != ARPHRD_ETHER) |
| 558 | goto out; |
| 559 | |
David S. Miller | d9452e9 | 2008-03-04 12:28:49 -0800 | [diff] [blame] | 560 | /* check if netpoll clients need ARP */ |
YOSHIFUJI Hideaki | 724800d | 2007-03-25 20:13:04 -0700 | [diff] [blame] | 561 | if (skb->protocol == htons(ETH_P_ARP) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | atomic_read(&trapped)) { |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 563 | skb_queue_tail(&npinfo->arp_tx, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | return 1; |
| 565 | } |
| 566 | |
| 567 | proto = ntohs(eth_hdr(skb)->h_proto); |
| 568 | if (proto != ETH_P_IP) |
| 569 | goto out; |
| 570 | if (skb->pkt_type == PACKET_OTHERHOST) |
| 571 | goto out; |
| 572 | if (skb_shared(skb)) |
| 573 | goto out; |
| 574 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) |
| 576 | goto out; |
Eric Dumazet | e9278a4 | 2011-08-26 06:26:15 +0000 | [diff] [blame] | 577 | iph = (struct iphdr *)skb->data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | if (iph->ihl < 5 || iph->version != 4) |
| 579 | goto out; |
| 580 | if (!pskb_may_pull(skb, iph->ihl*4)) |
| 581 | goto out; |
Eric Dumazet | e9278a4 | 2011-08-26 06:26:15 +0000 | [diff] [blame] | 582 | iph = (struct iphdr *)skb->data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) |
| 584 | goto out; |
| 585 | |
| 586 | len = ntohs(iph->tot_len); |
| 587 | if (skb->len < len || len < iph->ihl*4) |
| 588 | goto out; |
| 589 | |
Aubrey.Li | 5e7d7fa | 2007-04-17 12:40:20 -0700 | [diff] [blame] | 590 | /* |
| 591 | * Our transport medium may have padded the buffer out. |
| 592 | * Now We trim to the true length of the frame. |
| 593 | */ |
| 594 | if (pskb_trim_rcsum(skb, len)) |
| 595 | goto out; |
| 596 | |
Eric Dumazet | e9278a4 | 2011-08-26 06:26:15 +0000 | [diff] [blame] | 597 | iph = (struct iphdr *)skb->data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | if (iph->protocol != IPPROTO_UDP) |
| 599 | goto out; |
| 600 | |
| 601 | len -= iph->ihl*4; |
| 602 | uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); |
| 603 | ulen = ntohs(uh->len); |
| 604 | |
| 605 | if (ulen != len) |
| 606 | goto out; |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 607 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | |
Daniel Borkmann | 508e14b | 2010-01-12 14:27:30 +0000 | [diff] [blame] | 610 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
| 611 | if (np->local_ip && np->local_ip != iph->daddr) |
| 612 | continue; |
| 613 | if (np->remote_ip && np->remote_ip != iph->saddr) |
| 614 | continue; |
| 615 | if (np->local_port && np->local_port != ntohs(uh->dest)) |
| 616 | continue; |
| 617 | |
| 618 | np->rx_hook(np, ntohs(uh->source), |
| 619 | (char *)(uh+1), |
| 620 | ulen - sizeof(struct udphdr)); |
| 621 | hits++; |
| 622 | } |
| 623 | |
| 624 | if (!hits) |
| 625 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | |
| 627 | kfree_skb(skb); |
| 628 | return 1; |
| 629 | |
| 630 | out: |
| 631 | if (atomic_read(&trapped)) { |
| 632 | kfree_skb(skb); |
| 633 | return 1; |
| 634 | } |
| 635 | |
| 636 | return 0; |
| 637 | } |
| 638 | |
Satyam Sharma | 0bcc181 | 2007-08-10 15:35:05 -0700 | [diff] [blame] | 639 | void netpoll_print_options(struct netpoll *np) |
| 640 | { |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 641 | np_info(np, "local port %d\n", np->local_port); |
| 642 | np_info(np, "local IP %pI4\n", &np->local_ip); |
| 643 | np_info(np, "interface '%s'\n", np->dev_name); |
| 644 | np_info(np, "remote port %d\n", np->remote_port); |
| 645 | np_info(np, "remote IP %pI4\n", &np->remote_ip); |
| 646 | np_info(np, "remote ethernet address %pM\n", np->remote_mac); |
Satyam Sharma | 0bcc181 | 2007-08-10 15:35:05 -0700 | [diff] [blame] | 647 | } |
Eric Dumazet | 9e34a5b | 2010-07-09 21:22:04 +0000 | [diff] [blame] | 648 | EXPORT_SYMBOL(netpoll_print_options); |
Satyam Sharma | 0bcc181 | 2007-08-10 15:35:05 -0700 | [diff] [blame] | 649 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | int netpoll_parse_options(struct netpoll *np, char *opt) |
| 651 | { |
| 652 | char *cur=opt, *delim; |
| 653 | |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 654 | if (*cur != '@') { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | if ((delim = strchr(cur, '@')) == NULL) |
| 656 | goto parse_failed; |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 657 | *delim = 0; |
| 658 | np->local_port = simple_strtol(cur, NULL, 10); |
| 659 | cur = delim; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | } |
| 661 | cur++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 663 | if (*cur != '/') { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | if ((delim = strchr(cur, '/')) == NULL) |
| 665 | goto parse_failed; |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 666 | *delim = 0; |
Harvey Harrison | e7557af | 2009-03-28 15:38:31 +0000 | [diff] [blame] | 667 | np->local_ip = in_aton(cur); |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 668 | cur = delim; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | } |
| 670 | cur++; |
| 671 | |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 672 | if (*cur != ',') { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | /* parse out dev name */ |
| 674 | if ((delim = strchr(cur, ',')) == NULL) |
| 675 | goto parse_failed; |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 676 | *delim = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 678 | cur = delim; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | } |
| 680 | cur++; |
| 681 | |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 682 | if (*cur != '@') { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | /* dst port */ |
| 684 | if ((delim = strchr(cur, '@')) == NULL) |
| 685 | goto parse_failed; |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 686 | *delim = 0; |
Amerigo Wang | 5fc05f8 | 2010-03-21 22:59:58 +0000 | [diff] [blame] | 687 | if (*cur == ' ' || *cur == '\t') |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 688 | np_info(np, "warning: whitespace is not allowed\n"); |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 689 | np->remote_port = simple_strtol(cur, NULL, 10); |
| 690 | cur = delim; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | } |
| 692 | cur++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | |
| 694 | /* dst ip */ |
| 695 | if ((delim = strchr(cur, '/')) == NULL) |
| 696 | goto parse_failed; |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 697 | *delim = 0; |
Harvey Harrison | e7557af | 2009-03-28 15:38:31 +0000 | [diff] [blame] | 698 | np->remote_ip = in_aton(cur); |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 699 | cur = delim + 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 701 | if (*cur != 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | /* MAC address */ |
Alexey Dobriyan | 4940fc8 | 2011-05-07 23:00:07 +0000 | [diff] [blame] | 703 | if (!mac_pton(cur, np->remote_mac)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | goto parse_failed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | } |
| 706 | |
Satyam Sharma | 0bcc181 | 2007-08-10 15:35:05 -0700 | [diff] [blame] | 707 | netpoll_print_options(np); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | |
| 709 | return 0; |
| 710 | |
| 711 | parse_failed: |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 712 | np_info(np, "couldn't parse config at '%s'!\n", cur); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | return -1; |
| 714 | } |
Eric Dumazet | 9e34a5b | 2010-07-09 21:22:04 +0000 | [diff] [blame] | 715 | EXPORT_SYMBOL(netpoll_parse_options); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 717 | int __netpoll_setup(struct netpoll *np) |
| 718 | { |
| 719 | struct net_device *ndev = np->dev; |
| 720 | struct netpoll_info *npinfo; |
| 721 | const struct net_device_ops *ops; |
| 722 | unsigned long flags; |
| 723 | int err; |
| 724 | |
| 725 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || |
| 726 | !ndev->netdev_ops->ndo_poll_controller) { |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 727 | np_err(np, "%s doesn't support polling, aborting\n", |
| 728 | np->dev_name); |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 729 | err = -ENOTSUPP; |
| 730 | goto out; |
| 731 | } |
| 732 | |
| 733 | if (!ndev->npinfo) { |
| 734 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
| 735 | if (!npinfo) { |
| 736 | err = -ENOMEM; |
| 737 | goto out; |
| 738 | } |
| 739 | |
| 740 | npinfo->rx_flags = 0; |
| 741 | INIT_LIST_HEAD(&npinfo->rx_np); |
| 742 | |
| 743 | spin_lock_init(&npinfo->rx_lock); |
| 744 | skb_queue_head_init(&npinfo->arp_tx); |
| 745 | skb_queue_head_init(&npinfo->txq); |
| 746 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
| 747 | |
| 748 | atomic_set(&npinfo->refcnt, 1); |
| 749 | |
| 750 | ops = np->dev->netdev_ops; |
| 751 | if (ops->ndo_netpoll_setup) { |
| 752 | err = ops->ndo_netpoll_setup(ndev, npinfo); |
| 753 | if (err) |
| 754 | goto free_npinfo; |
| 755 | } |
| 756 | } else { |
| 757 | npinfo = ndev->npinfo; |
| 758 | atomic_inc(&npinfo->refcnt); |
| 759 | } |
| 760 | |
| 761 | npinfo->netpoll = np; |
| 762 | |
| 763 | if (np->rx_hook) { |
| 764 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 765 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; |
| 766 | list_add_tail(&np->rx, &npinfo->rx_np); |
| 767 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 768 | } |
| 769 | |
| 770 | /* last thing to do is link it to the net device structure */ |
Eric Dumazet | cf778b0 | 2012-01-12 04:41:32 +0000 | [diff] [blame] | 771 | rcu_assign_pointer(ndev->npinfo, npinfo); |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 772 | |
| 773 | return 0; |
| 774 | |
| 775 | free_npinfo: |
| 776 | kfree(npinfo); |
| 777 | out: |
| 778 | return err; |
| 779 | } |
| 780 | EXPORT_SYMBOL_GPL(__netpoll_setup); |
| 781 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | int netpoll_setup(struct netpoll *np) |
| 783 | { |
| 784 | struct net_device *ndev = NULL; |
| 785 | struct in_device *in_dev; |
Stephen Hemminger | b41848b | 2006-10-26 15:46:52 -0700 | [diff] [blame] | 786 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | |
| 788 | if (np->dev_name) |
Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 789 | ndev = dev_get_by_name(&init_net, np->dev_name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | if (!ndev) { |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 791 | np_err(np, "%s doesn't exist, aborting\n", np->dev_name); |
Stephen Hemminger | b41848b | 2006-10-26 15:46:52 -0700 | [diff] [blame] | 792 | return -ENODEV; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | } |
| 794 | |
WANG Cong | 0c1ad04 | 2011-06-09 00:28:13 -0700 | [diff] [blame] | 795 | if (ndev->master) { |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 796 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); |
Dan Carpenter | 83fe32d | 2011-06-11 18:55:22 -0700 | [diff] [blame] | 797 | err = -EBUSY; |
| 798 | goto put; |
WANG Cong | 0c1ad04 | 2011-06-09 00:28:13 -0700 | [diff] [blame] | 799 | } |
| 800 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | if (!netif_running(ndev)) { |
| 802 | unsigned long atmost, atleast; |
| 803 | |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 804 | np_info(np, "device %s not up yet, forcing it\n", np->dev_name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 806 | rtnl_lock(); |
Stephen Hemminger | b41848b | 2006-10-26 15:46:52 -0700 | [diff] [blame] | 807 | err = dev_open(ndev); |
| 808 | rtnl_unlock(); |
| 809 | |
| 810 | if (err) { |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 811 | np_err(np, "failed to open %s\n", ndev->name); |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 812 | goto put; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | |
| 815 | atleast = jiffies + HZ/10; |
Anton Vorontsov | bff3877 | 2009-07-08 11:10:56 -0700 | [diff] [blame] | 816 | atmost = jiffies + carrier_timeout * HZ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | while (!netif_carrier_ok(ndev)) { |
| 818 | if (time_after(jiffies, atmost)) { |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 819 | np_notice(np, "timeout waiting for carrier\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | break; |
| 821 | } |
Anton Vorontsov | 1b614fb | 2009-07-08 20:09:44 -0700 | [diff] [blame] | 822 | msleep(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | } |
| 824 | |
| 825 | /* If carrier appears to come up instantly, we don't |
| 826 | * trust it and pause so that we don't pump all our |
| 827 | * queued console messages into the bitbucket. |
| 828 | */ |
| 829 | |
| 830 | if (time_before(jiffies, atleast)) { |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 831 | np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | msleep(4000); |
| 833 | } |
| 834 | } |
| 835 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | if (!np->local_ip) { |
| 837 | rcu_read_lock(); |
Herbert Xu | e5ed639 | 2005-10-03 14:35:55 -0700 | [diff] [blame] | 838 | in_dev = __in_dev_get_rcu(ndev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | |
| 840 | if (!in_dev || !in_dev->ifa_list) { |
| 841 | rcu_read_unlock(); |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 842 | np_err(np, "no IP address for %s, aborting\n", |
| 843 | np->dev_name); |
Stephen Hemminger | b41848b | 2006-10-26 15:46:52 -0700 | [diff] [blame] | 844 | err = -EDESTADDRREQ; |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 845 | goto put; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | } |
| 847 | |
Harvey Harrison | e7557af | 2009-03-28 15:38:31 +0000 | [diff] [blame] | 848 | np->local_ip = in_dev->ifa_list->ifa_local; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | rcu_read_unlock(); |
Joe Perches | e6ec26935 | 2012-01-29 15:50:43 +0000 | [diff] [blame] | 850 | np_info(np, "local IP %pI4\n", &np->local_ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | } |
| 852 | |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 853 | np->dev = ndev; |
| 854 | |
| 855 | /* fill up the skb queue */ |
| 856 | refill_skbs(); |
| 857 | |
| 858 | rtnl_lock(); |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 859 | err = __netpoll_setup(np); |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 860 | rtnl_unlock(); |
Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 861 | |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 862 | if (err) |
| 863 | goto put; |
| 864 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | return 0; |
| 866 | |
Jiri Slaby | 21edbb2 | 2010-03-16 05:29:54 +0000 | [diff] [blame] | 867 | put: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | dev_put(ndev); |
Stephen Hemminger | b41848b | 2006-10-26 15:46:52 -0700 | [diff] [blame] | 869 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | } |
Eric Dumazet | 9e34a5b | 2010-07-09 21:22:04 +0000 | [diff] [blame] | 871 | EXPORT_SYMBOL(netpoll_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | |
David S. Miller | c68b907 | 2006-11-14 20:40:49 -0800 | [diff] [blame] | 873 | static int __init netpoll_init(void) |
| 874 | { |
Stephen Hemminger | a1bcfac | 2006-11-14 10:43:58 -0800 | [diff] [blame] | 875 | skb_queue_head_init(&skb_pool); |
| 876 | return 0; |
| 877 | } |
| 878 | core_initcall(netpoll_init); |
| 879 | |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 880 | void __netpoll_cleanup(struct netpoll *np) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | { |
Jeff Moyer | fbeec2e | 2005-06-22 22:05:59 -0700 | [diff] [blame] | 882 | struct netpoll_info *npinfo; |
| 883 | unsigned long flags; |
| 884 | |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 885 | npinfo = np->dev->npinfo; |
| 886 | if (!npinfo) |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 887 | return; |
Stephen Hemminger | 93ec2c7 | 2006-10-26 15:46:50 -0700 | [diff] [blame] | 888 | |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 889 | if (!list_empty(&npinfo->rx_np)) { |
| 890 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 891 | list_del(&np->rx); |
| 892 | if (list_empty(&npinfo->rx_np)) |
| 893 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; |
| 894 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 895 | } |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 896 | |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 897 | if (atomic_dec_and_test(&npinfo->refcnt)) { |
| 898 | const struct net_device_ops *ops; |
| 899 | |
| 900 | ops = np->dev->netdev_ops; |
| 901 | if (ops->ndo_netpoll_cleanup) |
| 902 | ops->ndo_netpoll_cleanup(np->dev); |
| 903 | |
Stephen Hemminger | a9b3cd7 | 2011-08-01 16:19:00 +0000 | [diff] [blame] | 904 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 905 | |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 906 | /* avoid racing with NAPI reading npinfo */ |
| 907 | synchronize_rcu_bh(); |
| 908 | |
| 909 | skb_queue_purge(&npinfo->arp_tx); |
| 910 | skb_queue_purge(&npinfo->txq); |
Tejun Heo | afe2c51 | 2010-12-14 16:21:17 +0100 | [diff] [blame] | 911 | cancel_delayed_work_sync(&npinfo->tx_work); |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 912 | |
| 913 | /* clean after last, unfinished work */ |
| 914 | __skb_queue_purge(&npinfo->txq); |
| 915 | kfree(npinfo); |
| 916 | } |
Herbert Xu | 8fdd95e | 2010-06-10 16:12:48 +0000 | [diff] [blame] | 917 | } |
| 918 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); |
| 919 | |
| 920 | void netpoll_cleanup(struct netpoll *np) |
| 921 | { |
| 922 | if (!np->dev) |
| 923 | return; |
| 924 | |
| 925 | rtnl_lock(); |
| 926 | __netpoll_cleanup(np); |
| 927 | rtnl_unlock(); |
Herbert Xu | dbaa154 | 2010-06-10 16:12:46 +0000 | [diff] [blame] | 928 | |
| 929 | dev_put(np->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | np->dev = NULL; |
| 931 | } |
Eric Dumazet | 9e34a5b | 2010-07-09 21:22:04 +0000 | [diff] [blame] | 932 | EXPORT_SYMBOL(netpoll_cleanup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | |
| 934 | int netpoll_trap(void) |
| 935 | { |
| 936 | return atomic_read(&trapped); |
| 937 | } |
Eric Dumazet | 9e34a5b | 2010-07-09 21:22:04 +0000 | [diff] [blame] | 938 | EXPORT_SYMBOL(netpoll_trap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 | |
| 940 | void netpoll_set_trap(int trap) |
| 941 | { |
| 942 | if (trap) |
| 943 | atomic_inc(&trapped); |
| 944 | else |
| 945 | atomic_dec(&trapped); |
| 946 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | EXPORT_SYMBOL(netpoll_set_trap); |