blob: 23bb2f4b18fe59e30b8268caefce4918db138195 [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Josh Boyerf35f76e2014-01-05 10:24:01 -050037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
46int xenvif_schedulable(struct xenvif *vif)
47{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
49}
50
Wei Liue1f00a692013-05-22 06:34:45 +000051static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000052{
53 struct xenvif *vif = dev_id;
54
Wei Liub3f980b2013-08-26 12:59:38 +010055 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
56 napi_schedule(&vif->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000057
Wei Liue1f00a692013-05-22 06:34:45 +000058 return IRQ_HANDLED;
59}
60
Wei Liub3f980b2013-08-26 12:59:38 +010061static int xenvif_poll(struct napi_struct *napi, int budget)
62{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi);
64 int work_done;
65
Wei Liu73764192013-08-26 12:59:39 +010066 work_done = xenvif_tx_action(vif, budget);
Wei Liub3f980b2013-08-26 12:59:38 +010067
68 if (work_done < budget) {
69 int more_to_do = 0;
70 unsigned long flags;
71
72 /* It is necessary to disable IRQ before calling
73 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
74 * lose event from the frontend.
75 *
76 * Consider:
77 * RING_HAS_UNCONSUMED_REQUESTS
78 * <frontend generates event to trigger napi_schedule>
79 * __napi_complete
80 *
81 * This handler is still in scheduled state so the
82 * event has no effect at all. After __napi_complete
83 * this handler is descheduled and cannot get
84 * scheduled again. We lose event in this case and the ring
85 * will be completely stalled.
86 */
87
88 local_irq_save(flags);
89
90 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000091 if (!(more_to_do &&
92 xenvif_tx_pending_slots_available(vif)))
Wei Liub3f980b2013-08-26 12:59:38 +010093 __napi_complete(napi);
94
95 local_irq_restore(flags);
96 }
97
98 return work_done;
99}
100
Wei Liue1f00a692013-05-22 06:34:45 +0000101static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
102{
103 struct xenvif *vif = dev_id;
104
Paul Durrantca2f09f2013-12-06 16:36:07 +0000105 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000106
107 return IRQ_HANDLED;
108}
109
Wei Liue1f00a692013-05-22 06:34:45 +0000110static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
111{
112 xenvif_tx_interrupt(irq, dev_id);
113 xenvif_rx_interrupt(irq, dev_id);
114
115 return IRQ_HANDLED;
116}
117
Zoltan Kiss09350782014-03-06 21:48:30 +0000118static void xenvif_wake_queue(unsigned long data)
119{
120 struct xenvif *vif = (struct xenvif *)data;
121
122 if (netif_queue_stopped(vif->dev)) {
123 netdev_err(vif->dev, "draining TX queue\n");
124 vif->rx_queue_purge = true;
125 xenvif_kick_thread(vif);
126 netif_wake_queue(vif->dev);
127 }
128}
129
Ian Campbellf942dc22011-03-15 00:06:18 +0000130static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
131{
132 struct xenvif *vif = netdev_priv(dev);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000133 int min_slots_needed;
Ian Campbellf942dc22011-03-15 00:06:18 +0000134
135 BUG_ON(skb->dev != dev);
136
Wei Liub3f980b2013-08-26 12:59:38 +0100137 /* Drop the packet if vif is not ready */
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000138 if (vif->task == NULL ||
139 vif->dealloc_task == NULL ||
140 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000141 goto drop;
142
Paul Durrantca2f09f2013-12-06 16:36:07 +0000143 /* At best we'll need one slot for the header and one for each
144 * frag.
145 */
146 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000147
Paul Durrantca2f09f2013-12-06 16:36:07 +0000148 /* If the skb is GSO then we'll also need an extra slot for the
149 * metadata.
150 */
Wei Liu836fbaf2014-03-11 12:45:32 +0000151 if (skb_is_gso(skb))
Paul Durrantca2f09f2013-12-06 16:36:07 +0000152 min_slots_needed++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000153
Paul Durrantca2f09f2013-12-06 16:36:07 +0000154 /* If the skb can't possibly fit in the remaining slots
155 * then turn off the queue to give the ring a chance to
156 * drain.
157 */
Zoltan Kiss09350782014-03-06 21:48:30 +0000158 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
159 vif->wake_queue.function = xenvif_wake_queue;
160 vif->wake_queue.data = (unsigned long)vif;
Paul Durrantca2f09f2013-12-06 16:36:07 +0000161 xenvif_stop_queue(vif);
Zoltan Kiss09350782014-03-06 21:48:30 +0000162 mod_timer(&vif->wake_queue,
163 jiffies + rx_drain_timeout_jiffies);
164 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000165
Paul Durrantca2f09f2013-12-06 16:36:07 +0000166 skb_queue_tail(&vif->rx_queue, skb);
167 xenvif_kick_thread(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000168
169 return NETDEV_TX_OK;
170
171 drop:
172 vif->dev->stats.tx_dropped++;
173 dev_kfree_skb(skb);
174 return NETDEV_TX_OK;
175}
176
Ian Campbellf942dc22011-03-15 00:06:18 +0000177static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
178{
179 struct xenvif *vif = netdev_priv(dev);
180 return &vif->dev->stats;
181}
182
183static void xenvif_up(struct xenvif *vif)
184{
Wei Liub3f980b2013-08-26 12:59:38 +0100185 napi_enable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000186 enable_irq(vif->tx_irq);
187 if (vif->tx_irq != vif->rx_irq)
188 enable_irq(vif->rx_irq);
Wei Liu73764192013-08-26 12:59:39 +0100189 xenvif_check_rx_xenvif(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000190}
191
192static void xenvif_down(struct xenvif *vif)
193{
Wei Liub3f980b2013-08-26 12:59:38 +0100194 napi_disable(&vif->napi);
Wei Liue1f00a692013-05-22 06:34:45 +0000195 disable_irq(vif->tx_irq);
196 if (vif->tx_irq != vif->rx_irq)
197 disable_irq(vif->rx_irq);
David Vrabel3e55f8b2013-02-14 03:18:58 +0000198 del_timer_sync(&vif->credit_timeout);
Ian Campbellf942dc22011-03-15 00:06:18 +0000199}
200
201static int xenvif_open(struct net_device *dev)
202{
203 struct xenvif *vif = netdev_priv(dev);
204 if (netif_carrier_ok(dev))
205 xenvif_up(vif);
206 netif_start_queue(dev);
207 return 0;
208}
209
210static int xenvif_close(struct net_device *dev)
211{
212 struct xenvif *vif = netdev_priv(dev);
213 if (netif_carrier_ok(dev))
214 xenvif_down(vif);
215 netif_stop_queue(dev);
216 return 0;
217}
218
219static int xenvif_change_mtu(struct net_device *dev, int mtu)
220{
221 struct xenvif *vif = netdev_priv(dev);
222 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
223
224 if (mtu > max)
225 return -EINVAL;
226 dev->mtu = mtu;
227 return 0;
228}
229
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000230static netdev_features_t xenvif_fix_features(struct net_device *dev,
231 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000232{
233 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000234
Michał Mirosław47103042011-04-19 03:35:06 +0000235 if (!vif->can_sg)
236 features &= ~NETIF_F_SG;
Paul Durrant82cada22013-10-16 17:50:32 +0100237 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000238 features &= ~NETIF_F_TSO;
Paul Durrant82cada22013-10-16 17:50:32 +0100239 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
240 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100241 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000242 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100243 if (!vif->ipv6_csum)
244 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000245
Michał Mirosław47103042011-04-19 03:35:06 +0000246 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000247}
248
249static const struct xenvif_stat {
250 char name[ETH_GSTRING_LEN];
251 u16 offset;
252} xenvif_stats[] = {
253 {
254 "rx_gso_checksum_fixup",
255 offsetof(struct xenvif, rx_gso_checksum_fixup)
256 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000257 /* If (sent != success + fail), there are probably packets never
258 * freed up properly!
259 */
260 {
261 "tx_zerocopy_sent",
262 offsetof(struct xenvif, tx_zerocopy_sent),
263 },
264 {
265 "tx_zerocopy_success",
266 offsetof(struct xenvif, tx_zerocopy_success),
267 },
268 {
269 "tx_zerocopy_fail",
270 offsetof(struct xenvif, tx_zerocopy_fail)
271 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000272 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
273 * a guest with the same MAX_SKB_FRAG
274 */
275 {
276 "tx_frag_overflow",
277 offsetof(struct xenvif, tx_frag_overflow)
278 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000279};
280
281static int xenvif_get_sset_count(struct net_device *dev, int string_set)
282{
283 switch (string_set) {
284 case ETH_SS_STATS:
285 return ARRAY_SIZE(xenvif_stats);
286 default:
287 return -EINVAL;
288 }
289}
290
291static void xenvif_get_ethtool_stats(struct net_device *dev,
292 struct ethtool_stats *stats, u64 * data)
293{
294 void *vif = netdev_priv(dev);
295 int i;
296
297 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
298 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
299}
300
301static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
302{
303 int i;
304
305 switch (stringset) {
306 case ETH_SS_STATS:
307 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
308 memcpy(data + i * ETH_GSTRING_LEN,
309 xenvif_stats[i].name, ETH_GSTRING_LEN);
310 break;
311 }
312}
313
stephen hemminger813abbb2012-01-04 11:56:58 +0000314static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000315 .get_link = ethtool_op_get_link,
316
317 .get_sset_count = xenvif_get_sset_count,
318 .get_ethtool_stats = xenvif_get_ethtool_stats,
319 .get_strings = xenvif_get_strings,
320};
321
stephen hemminger813abbb2012-01-04 11:56:58 +0000322static const struct net_device_ops xenvif_netdev_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000323 .ndo_start_xmit = xenvif_start_xmit,
324 .ndo_get_stats = xenvif_get_stats,
325 .ndo_open = xenvif_open,
326 .ndo_stop = xenvif_close,
327 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000328 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000329 .ndo_set_mac_address = eth_mac_addr,
330 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000331};
332
333struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
334 unsigned int handle)
335{
336 int err;
337 struct net_device *dev;
338 struct xenvif *vif;
339 char name[IFNAMSIZ] = {};
Wei Liub3f980b2013-08-26 12:59:38 +0100340 int i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000341
342 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
343 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
344 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100345 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000346 return ERR_PTR(-ENOMEM);
347 }
348
349 SET_NETDEV_DEV(dev, parent);
350
351 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000352
353 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
354 MAX_GRANT_COPY_OPS);
355 if (vif->grant_copy_op == NULL) {
356 pr_warn("Could not allocate grant copy space for %s\n", name);
357 free_netdev(dev);
358 return ERR_PTR(-ENOMEM);
359 }
360
Ian Campbellf942dc22011-03-15 00:06:18 +0000361 vif->domid = domid;
362 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000363 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100364 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000365 vif->dev = dev;
Ian Campbellf942dc22011-03-15 00:06:18 +0000366
367 vif->credit_bytes = vif->remaining_credit = ~0UL;
368 vif->credit_usec = 0UL;
369 init_timer(&vif->credit_timeout);
Wei Liu059dfa62013-10-28 12:07:57 +0000370 vif->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000371
Zoltan Kiss09350782014-03-06 21:48:30 +0000372 init_timer(&vif->wake_queue);
373
Ian Campbellf942dc22011-03-15 00:06:18 +0000374 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100375 dev->hw_features = NETIF_F_SG |
376 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Paul Durrant82cada22013-10-16 17:50:32 +0100377 NETIF_F_TSO | NETIF_F_TSO6;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100378 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000379 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
380
381 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
382
Wei Liub3f980b2013-08-26 12:59:38 +0100383 skb_queue_head_init(&vif->rx_queue);
384 skb_queue_head_init(&vif->tx_queue);
385
386 vif->pending_cons = 0;
387 vif->pending_prod = MAX_PENDING_REQS;
388 for (i = 0; i < MAX_PENDING_REQS; i++)
389 vif->pending_ring[i] = i;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000390 spin_lock_init(&vif->callback_lock);
391 spin_lock_init(&vif->response_lock);
392 /* If ballooning is disabled, this will consume real memory, so you
393 * better enable it. The long term solution would be to use just a
394 * bunch of valid page descriptors, without dependency on ballooning
395 */
396 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
397 vif->mmap_pages,
398 false);
399 if (err) {
400 netdev_err(dev, "Could not reserve mmap_pages\n");
401 return ERR_PTR(-ENOMEM);
402 }
403 for (i = 0; i < MAX_PENDING_REQS; i++) {
404 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
405 { .callback = xenvif_zerocopy_callback,
406 .ctx = NULL,
407 .desc = i };
408 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
409 }
Wei Liub3f980b2013-08-26 12:59:38 +0100410
Ian Campbellf942dc22011-03-15 00:06:18 +0000411 /*
412 * Initialise a dummy MAC address. We choose the numerically
413 * largest non-broadcast address to prevent the address getting
414 * stolen by an Ethernet bridge for STP purposes.
415 * (FE:FF:FF:FF:FF:FF)
416 */
417 memset(dev->dev_addr, 0xFF, ETH_ALEN);
418 dev->dev_addr[0] &= ~0x01;
419
Wei Liub3f980b2013-08-26 12:59:38 +0100420 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
421
Ian Campbellf942dc22011-03-15 00:06:18 +0000422 netif_carrier_off(dev);
423
424 err = register_netdev(dev);
425 if (err) {
426 netdev_warn(dev, "Could not register device: err=%d\n", err);
427 free_netdev(dev);
428 return ERR_PTR(err);
429 }
430
431 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100432
433 __module_get(THIS_MODULE);
434
Ian Campbellf942dc22011-03-15 00:06:18 +0000435 return vif;
436}
437
438int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
Wei Liue1f00a692013-05-22 06:34:45 +0000439 unsigned long rx_ring_ref, unsigned int tx_evtchn,
440 unsigned int rx_evtchn)
Ian Campbellf942dc22011-03-15 00:06:18 +0000441{
Paul Durrant67fa3662013-12-03 14:06:25 +0000442 struct task_struct *task;
Ian Campbellf942dc22011-03-15 00:06:18 +0000443 int err = -ENOMEM;
444
Paul Durrant67fa3662013-12-03 14:06:25 +0000445 BUG_ON(vif->tx_irq);
446 BUG_ON(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000447 BUG_ON(vif->dealloc_task);
Ian Campbellf942dc22011-03-15 00:06:18 +0000448
Wei Liu73764192013-08-26 12:59:39 +0100449 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
Ian Campbellf942dc22011-03-15 00:06:18 +0000450 if (err < 0)
451 goto err;
452
Paul Durrantca2f09f2013-12-06 16:36:07 +0000453 init_waitqueue_head(&vif->wq);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000454 init_waitqueue_head(&vif->dealloc_wq);
Paul Durrantca2f09f2013-12-06 16:36:07 +0000455
Wei Liue1f00a692013-05-22 06:34:45 +0000456 if (tx_evtchn == rx_evtchn) {
457 /* feature-split-event-channels == 0 */
458 err = bind_interdomain_evtchn_to_irqhandler(
459 vif->domid, tx_evtchn, xenvif_interrupt, 0,
460 vif->dev->name, vif);
461 if (err < 0)
462 goto err_unmap;
463 vif->tx_irq = vif->rx_irq = err;
464 disable_irq(vif->tx_irq);
465 } else {
466 /* feature-split-event-channels == 1 */
467 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
468 "%s-tx", vif->dev->name);
469 err = bind_interdomain_evtchn_to_irqhandler(
470 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
471 vif->tx_irq_name, vif);
472 if (err < 0)
473 goto err_unmap;
474 vif->tx_irq = err;
475 disable_irq(vif->tx_irq);
476
477 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
478 "%s-rx", vif->dev->name);
479 err = bind_interdomain_evtchn_to_irqhandler(
480 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
481 vif->rx_irq_name, vif);
482 if (err < 0)
483 goto err_tx_unbind;
484 vif->rx_irq = err;
485 disable_irq(vif->rx_irq);
486 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000487
Zoltan Kiss121fa4b2014-03-06 21:48:24 +0000488 task = kthread_create(xenvif_kthread_guest_rx,
489 (void *)vif, "%s-guest-rx", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000490 if (IS_ERR(task)) {
Wei Liub3f980b2013-08-26 12:59:38 +0100491 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
Paul Durrant67fa3662013-12-03 14:06:25 +0000492 err = PTR_ERR(task);
Wei Liub3f980b2013-08-26 12:59:38 +0100493 goto err_rx_unbind;
494 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000495
Paul Durrant67fa3662013-12-03 14:06:25 +0000496 vif->task = task;
497
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000498 task = kthread_create(xenvif_dealloc_kthread,
499 (void *)vif, "%s-dealloc", vif->dev->name);
500 if (IS_ERR(task)) {
501 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
502 err = PTR_ERR(task);
503 goto err_rx_unbind;
504 }
505
506 vif->dealloc_task = task;
507
Ian Campbellf942dc22011-03-15 00:06:18 +0000508 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000509 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
510 dev_set_mtu(vif->dev, ETH_DATA_LEN);
511 netdev_update_features(vif->dev);
512 netif_carrier_on(vif->dev);
David Vrabeld0e5d832011-09-30 06:37:51 +0000513 if (netif_running(vif->dev))
514 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000515 rtnl_unlock();
516
Wei Liub3f980b2013-08-26 12:59:38 +0100517 wake_up_process(vif->task);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000518 wake_up_process(vif->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100519
Ian Campbellf942dc22011-03-15 00:06:18 +0000520 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100521
522err_rx_unbind:
523 unbind_from_irqhandler(vif->rx_irq, vif);
524 vif->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000525err_tx_unbind:
526 unbind_from_irqhandler(vif->tx_irq, vif);
527 vif->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000528err_unmap:
Wei Liu73764192013-08-26 12:59:39 +0100529 xenvif_unmap_frontend_rings(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000530err:
Wei Liub103f352013-05-16 23:26:11 +0000531 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000532 return err;
533}
534
Ian Campbell488562862013-02-06 23:41:35 +0000535void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000536{
537 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000538
539 rtnl_lock();
540 netif_carrier_off(dev); /* discard queued packets */
541 if (netif_running(dev))
542 xenvif_down(vif);
543 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000544}
545
546void xenvif_disconnect(struct xenvif *vif)
547{
548 if (netif_carrier_ok(vif->dev))
549 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000550
Paul Durrant67fa3662013-12-03 14:06:25 +0000551 if (vif->task) {
Zoltan Kiss09350782014-03-06 21:48:30 +0000552 del_timer_sync(&vif->wake_queue);
David Vrabeldb739ef2013-11-21 15:26:09 +0000553 kthread_stop(vif->task);
Paul Durrant67fa3662013-12-03 14:06:25 +0000554 vif->task = NULL;
555 }
David Vrabeldb739ef2013-11-21 15:26:09 +0000556
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000557 if (vif->dealloc_task) {
558 kthread_stop(vif->dealloc_task);
559 vif->dealloc_task = NULL;
560 }
561
Wei Liue1f00a692013-05-22 06:34:45 +0000562 if (vif->tx_irq) {
563 if (vif->tx_irq == vif->rx_irq)
564 unbind_from_irqhandler(vif->tx_irq, vif);
565 else {
566 unbind_from_irqhandler(vif->tx_irq, vif);
567 unbind_from_irqhandler(vif->rx_irq, vif);
568 }
Paul Durrant279f4382013-09-17 17:46:08 +0100569 vif->tx_irq = 0;
Wei Liub103f352013-05-16 23:26:11 +0000570 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000571
Paul Durrant279f4382013-09-17 17:46:08 +0100572 xenvif_unmap_frontend_rings(vif);
573}
574
575void xenvif_free(struct xenvif *vif)
576{
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000577 int i, unmap_timeout = 0;
Zoltan Kiss09350782014-03-06 21:48:30 +0000578 /* Here we want to avoid timeout messages if an skb can be legitimatly
579 * stucked somewhere else. Realisticly this could be an another vif's
580 * internal or QDisc queue. That another vif also has this
581 * rx_drain_timeout_msecs timeout, but the timer only ditches the
582 * internal queue. After that, the QDisc queue can put in worst case
583 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
584 * internal queue, so we need several rounds of such timeouts until we
585 * can be sure that no another vif should have skb's from us. We are
586 * not sending more skb's, so newly stucked packets are not interesting
587 * for us here.
588 */
589 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
590 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000591
592 for (i = 0; i < MAX_PENDING_REQS; ++i) {
593 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
594 unmap_timeout++;
595 schedule_timeout(msecs_to_jiffies(1000));
Zoltan Kiss09350782014-03-06 21:48:30 +0000596 if (unmap_timeout > worst_case_skb_lifetime &&
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000597 net_ratelimit())
598 netdev_err(vif->dev,
599 "Page still granted! Index: %x\n",
600 i);
601 i = -1;
602 }
603 }
604
605 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
606
Wei Liub3f980b2013-08-26 12:59:38 +0100607 netif_napi_del(&vif->napi);
608
Ian Campbellf942dc22011-03-15 00:06:18 +0000609 unregister_netdev(vif->dev);
610
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000611 vfree(vif->grant_copy_op);
Ian Campbellf942dc22011-03-15 00:06:18 +0000612 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000613
Paul Durrant279f4382013-09-17 17:46:08 +0100614 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000615}