blob: e71fb1ac5c4d6af521ce0d8bb1c792785f4efcaf [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Josh Boyerf35f76e2014-01-05 10:24:01 -050037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
46int xenvif_schedulable(struct xenvif *vif)
47{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
49}
50
Wei Liue1f00a692013-05-22 06:34:45 +000051static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000052{
53 struct xenvif *vif = dev_id;
54
Wei Liub3f980b2013-08-26 12:59:38 +010055 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
56 napi_schedule(&vif->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000057
Wei Liue1f00a692013-05-22 06:34:45 +000058 return IRQ_HANDLED;
59}
60
Wei Liub3f980b2013-08-26 12:59:38 +010061static int xenvif_poll(struct napi_struct *napi, int budget)
62{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi);
64 int work_done;
65
Wei Liu73764192013-08-26 12:59:39 +010066 work_done = xenvif_tx_action(vif, budget);
Wei Liub3f980b2013-08-26 12:59:38 +010067
68 if (work_done < budget) {
69 int more_to_do = 0;
70 unsigned long flags;
71
72 /* It is necessary to disable IRQ before calling
73 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
74 * lose event from the frontend.
75 *
76 * Consider:
77 * RING_HAS_UNCONSUMED_REQUESTS
78 * <frontend generates event to trigger napi_schedule>
79 * __napi_complete
80 *
81 * This handler is still in scheduled state so the
82 * event has no effect at all. After __napi_complete
83 * this handler is descheduled and cannot get
84 * scheduled again. We lose event in this case and the ring
85 * will be completely stalled.
86 */
87
88 local_irq_save(flags);
89
90 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
Zoltan Kiss869b9b12014-03-24 23:59:49 +000091 if (!more_to_do)
Wei Liub3f980b2013-08-26 12:59:38 +010092 __napi_complete(napi);
93
94 local_irq_restore(flags);
95 }
96
97 return work_done;
98}
99
Wei Liue1f00a692013-05-22 06:34:45 +0000100static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
101{
102 struct xenvif *vif = dev_id;
103
Paul Durrantca2f09f2013-12-06 16:36:07 +0000104 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000105
106 return IRQ_HANDLED;
107}
108
Wei Liue1f00a692013-05-22 06:34:45 +0000109static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
110{
111 xenvif_tx_interrupt(irq, dev_id);
112 xenvif_rx_interrupt(irq, dev_id);
113
114 return IRQ_HANDLED;
115}
116
Zoltan Kiss09350782014-03-06 21:48:30 +0000117static void xenvif_wake_queue(unsigned long data)
118{
119 struct xenvif *vif = (struct xenvif *)data;
120
121 if (netif_queue_stopped(vif->dev)) {
122 netdev_err(vif->dev, "draining TX queue\n");
123 vif->rx_queue_purge = true;
124 xenvif_kick_thread(vif);
125 netif_wake_queue(vif->dev);
126 }
127}
128
Ian Campbellf942dc22011-03-15 00:06:18 +0000129static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
130{
131 struct xenvif *vif = netdev_priv(dev);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000132 int min_slots_needed;
Ian Campbellf942dc22011-03-15 00:06:18 +0000133
134 BUG_ON(skb->dev != dev);
135
Wei Liub3f980b2013-08-26 12:59:38 +0100136 /* Drop the packet if vif is not ready */
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000137 if (vif->task == NULL ||
138 vif->dealloc_task == NULL ||
139 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000140 goto drop;
141
Paul Durrantca2f09f2013-12-06 16:36:07 +0000142 /* At best we'll need one slot for the header and one for each
143 * frag.
144 */
145 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000146
Paul Durrantca2f09f2013-12-06 16:36:07 +0000147 /* If the skb is GSO then we'll also need an extra slot for the
148 * metadata.
149 */
Wei Liu836fbaf2014-03-11 12:45:32 +0000150 if (skb_is_gso(skb))
Paul Durrantca2f09f2013-12-06 16:36:07 +0000151 min_slots_needed++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000152
Paul Durrantca2f09f2013-12-06 16:36:07 +0000153 /* If the skb can't possibly fit in the remaining slots
154 * then turn off the queue to give the ring a chance to
155 * drain.
156 */
Zoltan Kiss09350782014-03-06 21:48:30 +0000157 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
158 vif->wake_queue.function = xenvif_wake_queue;
159 vif->wake_queue.data = (unsigned long)vif;
Paul Durrantca2f09f2013-12-06 16:36:07 +0000160 xenvif_stop_queue(vif);
Zoltan Kiss09350782014-03-06 21:48:30 +0000161 mod_timer(&vif->wake_queue,
162 jiffies + rx_drain_timeout_jiffies);
163 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000164
Paul Durrantca2f09f2013-12-06 16:36:07 +0000165 skb_queue_tail(&vif->rx_queue, skb);
166 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000167
168 return NETDEV_TX_OK;
169
170 drop:
171 vif->dev->stats.tx_dropped++;
172 dev_kfree_skb(skb);
173 return NETDEV_TX_OK;
174}
175
Ian Campbellf942dc22011-03-15 00:06:18 +0000176static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
177{
178 struct xenvif *vif = netdev_priv(dev);
179 return &vif->dev->stats;
180}
181
182static void xenvif_up(struct xenvif *vif)
183{
Wei Liub3f980b2013-08-26 12:59:38 +0100184 napi_enable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000185 enable_irq(vif->tx_irq);
186 if (vif->tx_irq != vif->rx_irq)
187 enable_irq(vif->rx_irq);
Wei Liu73764192013-08-26 12:59:39 +0100188 xenvif_check_rx_xenvif(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000189}
190
191static void xenvif_down(struct xenvif *vif)
192{
Wei Liub3f980b2013-08-26 12:59:38 +0100193 napi_disable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000194 disable_irq(vif->tx_irq);
195 if (vif->tx_irq != vif->rx_irq)
196 disable_irq(vif->rx_irq);
David Vrabel3e55f8b2013-02-14 03:18:58 +0000197 del_timer_sync(&vif->credit_timeout);
Ian Campbellf942dc22011-03-15 00:06:18 +0000198}
199
200static int xenvif_open(struct net_device *dev)
201{
202 struct xenvif *vif = netdev_priv(dev);
203 if (netif_carrier_ok(dev))
204 xenvif_up(vif);
205 netif_start_queue(dev);
206 return 0;
207}
208
209static int xenvif_close(struct net_device *dev)
210{
211 struct xenvif *vif = netdev_priv(dev);
212 if (netif_carrier_ok(dev))
213 xenvif_down(vif);
214 netif_stop_queue(dev);
215 return 0;
216}
217
218static int xenvif_change_mtu(struct net_device *dev, int mtu)
219{
220 struct xenvif *vif = netdev_priv(dev);
221 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
222
223 if (mtu > max)
224 return -EINVAL;
225 dev->mtu = mtu;
226 return 0;
227}
228
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000229static netdev_features_t xenvif_fix_features(struct net_device *dev,
230 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000231{
232 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000233
Michał Mirosław47103042011-04-19 03:35:06 +0000234 if (!vif->can_sg)
235 features &= ~NETIF_F_SG;
Paul Durrant82cada22013-10-16 17:50:32 +0100236 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000237 features &= ~NETIF_F_TSO;
Paul Durrant82cada22013-10-16 17:50:32 +0100238 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
239 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100240 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000241 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100242 if (!vif->ipv6_csum)
243 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000244
Michał Mirosław47103042011-04-19 03:35:06 +0000245 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000246}
247
248static const struct xenvif_stat {
249 char name[ETH_GSTRING_LEN];
250 u16 offset;
251} xenvif_stats[] = {
252 {
253 "rx_gso_checksum_fixup",
254 offsetof(struct xenvif, rx_gso_checksum_fixup)
255 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000256 /* If (sent != success + fail), there are probably packets never
257 * freed up properly!
258 */
259 {
260 "tx_zerocopy_sent",
261 offsetof(struct xenvif, tx_zerocopy_sent),
262 },
263 {
264 "tx_zerocopy_success",
265 offsetof(struct xenvif, tx_zerocopy_success),
266 },
267 {
268 "tx_zerocopy_fail",
269 offsetof(struct xenvif, tx_zerocopy_fail)
270 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000271 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
272 * a guest with the same MAX_SKB_FRAG
273 */
274 {
275 "tx_frag_overflow",
276 offsetof(struct xenvif, tx_frag_overflow)
277 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000278};
279
280static int xenvif_get_sset_count(struct net_device *dev, int string_set)
281{
282 switch (string_set) {
283 case ETH_SS_STATS:
284 return ARRAY_SIZE(xenvif_stats);
285 default:
286 return -EINVAL;
287 }
288}
289
290static void xenvif_get_ethtool_stats(struct net_device *dev,
291 struct ethtool_stats *stats, u64 * data)
292{
293 void *vif = netdev_priv(dev);
294 int i;
295
296 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
297 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
298}
299
300static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
301{
302 int i;
303
304 switch (stringset) {
305 case ETH_SS_STATS:
306 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
307 memcpy(data + i * ETH_GSTRING_LEN,
308 xenvif_stats[i].name, ETH_GSTRING_LEN);
309 break;
310 }
311}
312
stephen hemminger813abbb2012-01-04 11:56:58 +0000313static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000314 .get_link = ethtool_op_get_link,
315
316 .get_sset_count = xenvif_get_sset_count,
317 .get_ethtool_stats = xenvif_get_ethtool_stats,
318 .get_strings = xenvif_get_strings,
319};
320
stephen hemminger813abbb2012-01-04 11:56:58 +0000321static const struct net_device_ops xenvif_netdev_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000322 .ndo_start_xmit = xenvif_start_xmit,
323 .ndo_get_stats = xenvif_get_stats,
324 .ndo_open = xenvif_open,
325 .ndo_stop = xenvif_close,
326 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000327 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000328 .ndo_set_mac_address = eth_mac_addr,
329 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000330};
331
332struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
333 unsigned int handle)
334{
335 int err;
336 struct net_device *dev;
337 struct xenvif *vif;
338 char name[IFNAMSIZ] = {};
Wei Liub3f980b2013-08-26 12:59:38 +0100339 int i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000340
341 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
342 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
343 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100344 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000345 return ERR_PTR(-ENOMEM);
346 }
347
348 SET_NETDEV_DEV(dev, parent);
349
350 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000351
352 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
353 MAX_GRANT_COPY_OPS);
354 if (vif->grant_copy_op == NULL) {
355 pr_warn("Could not allocate grant copy space for %s\n", name);
356 free_netdev(dev);
357 return ERR_PTR(-ENOMEM);
358 }
359
Ian Campbellf942dc22011-03-15 00:06:18 +0000360 vif->domid = domid;
361 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000362 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100363 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000364 vif->dev = dev;
Ian Campbellf942dc22011-03-15 00:06:18 +0000365
366 vif->credit_bytes = vif->remaining_credit = ~0UL;
367 vif->credit_usec = 0UL;
368 init_timer(&vif->credit_timeout);
Wei Liu059dfa62013-10-28 12:07:57 +0000369 vif->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000370
Zoltan Kiss09350782014-03-06 21:48:30 +0000371 init_timer(&vif->wake_queue);
372
Ian Campbellf942dc22011-03-15 00:06:18 +0000373 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100374 dev->hw_features = NETIF_F_SG |
375 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Paul Durrant82cada22013-10-16 17:50:32 +0100376 NETIF_F_TSO | NETIF_F_TSO6;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100377 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000378 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
379
380 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
381
Wei Liub3f980b2013-08-26 12:59:38 +0100382 skb_queue_head_init(&vif->rx_queue);
383 skb_queue_head_init(&vif->tx_queue);
384
385 vif->pending_cons = 0;
386 vif->pending_prod = MAX_PENDING_REQS;
387 for (i = 0; i < MAX_PENDING_REQS; i++)
388 vif->pending_ring[i] = i;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000389 spin_lock_init(&vif->callback_lock);
390 spin_lock_init(&vif->response_lock);
391 /* If ballooning is disabled, this will consume real memory, so you
392 * better enable it. The long term solution would be to use just a
393 * bunch of valid page descriptors, without dependency on ballooning
394 */
395 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
396 vif->mmap_pages,
397 false);
398 if (err) {
399 netdev_err(dev, "Could not reserve mmap_pages\n");
400 return ERR_PTR(-ENOMEM);
401 }
402 for (i = 0; i < MAX_PENDING_REQS; i++) {
403 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
404 { .callback = xenvif_zerocopy_callback,
405 .ctx = NULL,
406 .desc = i };
407 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
408 }
Wei Liub3f980b2013-08-26 12:59:38 +0100409
Ian Campbellf942dc22011-03-15 00:06:18 +0000410 /*
411 * Initialise a dummy MAC address. We choose the numerically
412 * largest non-broadcast address to prevent the address getting
413 * stolen by an Ethernet bridge for STP purposes.
414 * (FE:FF:FF:FF:FF:FF)
415 */
416 memset(dev->dev_addr, 0xFF, ETH_ALEN);
417 dev->dev_addr[0] &= ~0x01;
418
Wei Liub3f980b2013-08-26 12:59:38 +0100419 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
420
Ian Campbellf942dc22011-03-15 00:06:18 +0000421 netif_carrier_off(dev);
422
423 err = register_netdev(dev);
424 if (err) {
425 netdev_warn(dev, "Could not register device: err=%d\n", err);
426 free_netdev(dev);
427 return ERR_PTR(err);
428 }
429
430 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100431
432 __module_get(THIS_MODULE);
433
Ian Campbellf942dc22011-03-15 00:06:18 +0000434 return vif;
435}
436
437int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
Wei Liue1f00a692013-05-22 06:34:45 +0000438 unsigned long rx_ring_ref, unsigned int tx_evtchn,
439 unsigned int rx_evtchn)
Ian Campbellf942dc22011-03-15 00:06:18 +0000440{
Paul Durrant67fa3662013-12-03 14:06:25 +0000441 struct task_struct *task;
Ian Campbellf942dc22011-03-15 00:06:18 +0000442 int err = -ENOMEM;
443
Paul Durrant67fa3662013-12-03 14:06:25 +0000444 BUG_ON(vif->tx_irq);
445 BUG_ON(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000446 BUG_ON(vif->dealloc_task);
Ian Campbellf942dc22011-03-15 00:06:18 +0000447
Wei Liu73764192013-08-26 12:59:39 +0100448 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
Ian Campbellf942dc22011-03-15 00:06:18 +0000449 if (err < 0)
450 goto err;
451
Paul Durrantca2f09f2013-12-06 16:36:07 +0000452 init_waitqueue_head(&vif->wq);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000453 init_waitqueue_head(&vif->dealloc_wq);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000454
Wei Liue1f00a692013-05-22 06:34:45 +0000455 if (tx_evtchn == rx_evtchn) {
456 /* feature-split-event-channels == 0 */
457 err = bind_interdomain_evtchn_to_irqhandler(
458 vif->domid, tx_evtchn, xenvif_interrupt, 0,
459 vif->dev->name, vif);
460 if (err < 0)
461 goto err_unmap;
462 vif->tx_irq = vif->rx_irq = err;
463 disable_irq(vif->tx_irq);
464 } else {
465 /* feature-split-event-channels == 1 */
466 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
467 "%s-tx", vif->dev->name);
468 err = bind_interdomain_evtchn_to_irqhandler(
469 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
470 vif->tx_irq_name, vif);
471 if (err < 0)
472 goto err_unmap;
473 vif->tx_irq = err;
474 disable_irq(vif->tx_irq);
475
476 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
477 "%s-rx", vif->dev->name);
478 err = bind_interdomain_evtchn_to_irqhandler(
479 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
480 vif->rx_irq_name, vif);
481 if (err < 0)
482 goto err_tx_unbind;
483 vif->rx_irq = err;
484 disable_irq(vif->rx_irq);
485 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000486
Zoltan Kiss121fa4b2014-03-06 21:48:24 +0000487 task = kthread_create(xenvif_kthread_guest_rx,
488 (void *)vif, "%s-guest-rx", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000489 if (IS_ERR(task)) {
Wei Liub3f980b2013-08-26 12:59:38 +0100490 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000491 err = PTR_ERR(task);
Wei Liub3f980b2013-08-26 12:59:38 +0100492 goto err_rx_unbind;
493 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000494
Paul Durrant67fa3662013-12-03 14:06:25 +0000495 vif->task = task;
496
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000497 task = kthread_create(xenvif_dealloc_kthread,
498 (void *)vif, "%s-dealloc", vif->dev->name);
499 if (IS_ERR(task)) {
500 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
501 err = PTR_ERR(task);
502 goto err_rx_unbind;
503 }
504
505 vif->dealloc_task = task;
506
Ian Campbellf942dc22011-03-15 00:06:18 +0000507 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000508 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
509 dev_set_mtu(vif->dev, ETH_DATA_LEN);
510 netdev_update_features(vif->dev);
511 netif_carrier_on(vif->dev);
David Vrabeld0e5d832011-09-30 06:37:51 +0000512 if (netif_running(vif->dev))
513 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000514 rtnl_unlock();
515
Wei Liub3f980b2013-08-26 12:59:38 +0100516 wake_up_process(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000517 wake_up_process(vif->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100518
Ian Campbellf942dc22011-03-15 00:06:18 +0000519 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100520
521err_rx_unbind:
522 unbind_from_irqhandler(vif->rx_irq, vif);
523 vif->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000524err_tx_unbind:
525 unbind_from_irqhandler(vif->tx_irq, vif);
526 vif->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000527err_unmap:
Wei Liu73764192013-08-26 12:59:39 +0100528 xenvif_unmap_frontend_rings(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000529err:
Wei Liub103f352013-05-16 23:26:11 +0000530 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000531 return err;
532}
533
Ian Campbell488562862013-02-06 23:41:35 +0000534void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000535{
536 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000537
538 rtnl_lock();
539 netif_carrier_off(dev); /* discard queued packets */
540 if (netif_running(dev))
541 xenvif_down(vif);
542 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000543}
544
545void xenvif_disconnect(struct xenvif *vif)
546{
547 if (netif_carrier_ok(vif->dev))
548 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000549
Paul Durrant67fa3662013-12-03 14:06:25 +0000550 if (vif->task) {
Zoltan Kiss09350782014-03-06 21:48:30 +0000551 del_timer_sync(&vif->wake_queue);
David Vrabeldb739ef2013-11-21 15:26:09 +0000552 kthread_stop(vif->task);
Paul Durrant67fa3662013-12-03 14:06:25 +0000553 vif->task = NULL;
554 }
David Vrabeldb739ef2013-11-21 15:26:09 +0000555
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000556 if (vif->dealloc_task) {
557 kthread_stop(vif->dealloc_task);
558 vif->dealloc_task = NULL;
559 }
560
Wei Liue1f00a692013-05-22 06:34:45 +0000561 if (vif->tx_irq) {
562 if (vif->tx_irq == vif->rx_irq)
563 unbind_from_irqhandler(vif->tx_irq, vif);
564 else {
565 unbind_from_irqhandler(vif->tx_irq, vif);
566 unbind_from_irqhandler(vif->rx_irq, vif);
567 }
Paul Durrant279f4382013-09-17 17:46:08 +0100568 vif->tx_irq = 0;
Wei Liub103f352013-05-16 23:26:11 +0000569 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000570
Paul Durrant279f4382013-09-17 17:46:08 +0100571 xenvif_unmap_frontend_rings(vif);
572}
573
574void xenvif_free(struct xenvif *vif)
575{
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000576 int i, unmap_timeout = 0;
Zoltan Kiss09350782014-03-06 21:48:30 +0000577 /* Here we want to avoid timeout messages if an skb can be legitimatly
578 * stucked somewhere else. Realisticly this could be an another vif's
579 * internal or QDisc queue. That another vif also has this
580 * rx_drain_timeout_msecs timeout, but the timer only ditches the
581 * internal queue. After that, the QDisc queue can put in worst case
582 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
583 * internal queue, so we need several rounds of such timeouts until we
584 * can be sure that no another vif should have skb's from us. We are
585 * not sending more skb's, so newly stucked packets are not interesting
586 * for us here.
587 */
588 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
589 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000590
591 for (i = 0; i < MAX_PENDING_REQS; ++i) {
592 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
593 unmap_timeout++;
594 schedule_timeout(msecs_to_jiffies(1000));
Zoltan Kiss09350782014-03-06 21:48:30 +0000595 if (unmap_timeout > worst_case_skb_lifetime &&
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000596 net_ratelimit())
597 netdev_err(vif->dev,
598 "Page still granted! Index: %x\n",
599 i);
600 i = -1;
601 }
602 }
603
604 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
605
Wei Liub3f980b2013-08-26 12:59:38 +0100606 netif_napi_del(&vif->napi);
607
Ian Campbellf942dc22011-03-15 00:06:18 +0000608 unregister_netdev(vif->dev);
609
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000610 vfree(vif->grant_copy_op);
Ian Campbellf942dc22011-03-15 00:06:18 +0000611 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000612
Paul Durrant279f4382013-09-17 17:46:08 +0100613 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000614}