blob: 28577a31549d1569032d63457464fe11fdf44d32 [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Arnd Bergmanne7b599d2014-06-10 10:34:36 +020037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
David Vrabelf48da8b2014-10-22 14:08:54 +010046/* Number of bytes allowed on the internal guest Rx queue. */
47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
Wei Liua64bd932014-08-12 11:48:07 +010049/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
53 */
54void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 struct sk_buff *skb)
56{
57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 atomic_inc(&queue->inflight_packets);
59}
60
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{
63 atomic_dec(&queue->inflight_packets);
Ross Lagerwall57b22902015-08-04 15:40:59 +010064
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +010070}
71
Ian Campbellf942dc22011-03-15 00:06:18 +000072int xenvif_schedulable(struct xenvif *vif)
73{
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +010074 return netif_running(vif->dev) &&
David Vrabelf48da8b2014-10-22 14:08:54 +010075 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 !vif->disabled;
Ian Campbellf942dc22011-03-15 00:06:18 +000077}
78
Wei Liue1f00a692013-05-22 06:34:45 +000079static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000080{
Wei Liue9ce7cb2014-06-04 10:30:42 +010081 struct xenvif_queue *queue = dev_id;
Ian Campbellf942dc22011-03-15 00:06:18 +000082
Wei Liue9ce7cb2014-06-04 10:30:42 +010083 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
84 napi_schedule(&queue->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000085
Wei Liue1f00a692013-05-22 06:34:45 +000086 return IRQ_HANDLED;
87}
88
Lad, Prabhakar38741d52015-02-05 13:38:07 +000089static int xenvif_poll(struct napi_struct *napi, int budget)
Wei Liub3f980b2013-08-26 12:59:38 +010090{
Wei Liue9ce7cb2014-06-04 10:30:42 +010091 struct xenvif_queue *queue =
92 container_of(napi, struct xenvif_queue, napi);
Wei Liub3f980b2013-08-26 12:59:38 +010093 int work_done;
94
Wei Liue9d8b2c2014-04-01 12:46:12 +010095 /* This vif is rogue, we pretend we've there is nothing to do
96 * for this vif to deschedule it from NAPI. But this interface
97 * will be turned off in thread context later.
98 */
Zoltan Kiss2561cc12014-08-11 13:01:44 +010099 if (unlikely(queue->vif->disabled)) {
Wei Liue9d8b2c2014-04-01 12:46:12 +0100100 napi_complete(napi);
101 return 0;
102 }
103
Wei Liue9ce7cb2014-06-04 10:30:42 +0100104 work_done = xenvif_tx_action(queue, budget);
Wei Liub3f980b2013-08-26 12:59:38 +0100105
106 if (work_done < budget) {
David Vrabel0d08fce2014-05-16 12:26:04 +0100107 napi_complete(napi);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100108 xenvif_napi_schedule_or_enable_events(queue);
Wei Liub3f980b2013-08-26 12:59:38 +0100109 }
110
111 return work_done;
112}
113
Wei Liue1f00a692013-05-22 06:34:45 +0000114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100116 struct xenvif_queue *queue = dev_id;
Wei Liue1f00a692013-05-22 06:34:45 +0000117
Wei Liue9ce7cb2014-06-04 10:30:42 +0100118 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000119
120 return IRQ_HANDLED;
121}
122
Zoltan Kissf51de242014-07-08 19:49:14 +0100123irqreturn_t xenvif_interrupt(int irq, void *dev_id)
Wei Liue1f00a692013-05-22 06:34:45 +0000124{
125 xenvif_tx_interrupt(irq, dev_id);
126 xenvif_rx_interrupt(irq, dev_id);
127
128 return IRQ_HANDLED;
129}
130
Wei Liue9ce7cb2014-06-04 10:30:42 +0100131int xenvif_queue_stopped(struct xenvif_queue *queue)
Zoltan Kiss09350782014-03-06 21:48:30 +0000132{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100133 struct net_device *dev = queue->vif->dev;
134 unsigned int id = queue->id;
135 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
136}
Zoltan Kiss09350782014-03-06 21:48:30 +0000137
Wei Liue9ce7cb2014-06-04 10:30:42 +0100138void xenvif_wake_queue(struct xenvif_queue *queue)
139{
140 struct net_device *dev = queue->vif->dev;
141 unsigned int id = queue->id;
142 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
143}
144
Ian Campbellf942dc22011-03-15 00:06:18 +0000145static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
146{
147 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100148 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100149 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100150 u16 index;
David Vrabelf48da8b2014-10-22 14:08:54 +0100151 struct xenvif_rx_cb *cb;
Ian Campbellf942dc22011-03-15 00:06:18 +0000152
153 BUG_ON(skb->dev != dev);
154
Wei Liue9ce7cb2014-06-04 10:30:42 +0100155 /* Drop the packet if queues are not set up */
156 if (num_queues < 1)
157 goto drop;
158
159 /* Obtain the queue to be used to transmit this packet */
160 index = skb_get_queue_mapping(skb);
161 if (index >= num_queues) {
162 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
163 index, vif->dev->name);
164 index %= num_queues;
165 }
166 queue = &vif->queues[index];
167
168 /* Drop the packet if queue is not ready */
169 if (queue->task == NULL ||
170 queue->dealloc_task == NULL ||
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000171 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000172 goto drop;
173
David Vrabelf48da8b2014-10-22 14:08:54 +0100174 cb = XENVIF_RX_CB(skb);
David Vrabel26c0e102014-12-18 11:13:06 +0000175 cb->expires = jiffies + vif->drain_timeout;
Ian Campbellf942dc22011-03-15 00:06:18 +0000176
David Vrabelf48da8b2014-10-22 14:08:54 +0100177 xenvif_rx_queue_tail(queue, skb);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100178 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000179
180 return NETDEV_TX_OK;
181
182 drop:
183 vif->dev->stats.tx_dropped++;
184 dev_kfree_skb(skb);
185 return NETDEV_TX_OK;
186}
187
Ian Campbellf942dc22011-03-15 00:06:18 +0000188static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
189{
190 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100191 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100192 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100193 unsigned long rx_bytes = 0;
194 unsigned long rx_packets = 0;
195 unsigned long tx_bytes = 0;
196 unsigned long tx_packets = 0;
197 unsigned int index;
198
199 if (vif->queues == NULL)
200 goto out;
201
202 /* Aggregate tx and rx stats from each queue */
203 for (index = 0; index < num_queues; ++index) {
204 queue = &vif->queues[index];
205 rx_bytes += queue->stats.rx_bytes;
206 rx_packets += queue->stats.rx_packets;
207 tx_bytes += queue->stats.tx_bytes;
208 tx_packets += queue->stats.tx_packets;
209 }
210
211out:
212 vif->dev->stats.rx_bytes = rx_bytes;
213 vif->dev->stats.rx_packets = rx_packets;
214 vif->dev->stats.tx_bytes = tx_bytes;
215 vif->dev->stats.tx_packets = tx_packets;
216
Ian Campbellf942dc22011-03-15 00:06:18 +0000217 return &vif->dev->stats;
218}
219
220static void xenvif_up(struct xenvif *vif)
221{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100222 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100223 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100224 unsigned int queue_index;
225
226 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
227 queue = &vif->queues[queue_index];
228 napi_enable(&queue->napi);
229 enable_irq(queue->tx_irq);
230 if (queue->tx_irq != queue->rx_irq)
231 enable_irq(queue->rx_irq);
232 xenvif_napi_schedule_or_enable_events(queue);
233 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000234}
235
236static void xenvif_down(struct xenvif *vif)
237{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100238 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100239 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100240 unsigned int queue_index;
241
242 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
243 queue = &vif->queues[queue_index];
Wei Liue9ce7cb2014-06-04 10:30:42 +0100244 disable_irq(queue->tx_irq);
245 if (queue->tx_irq != queue->rx_irq)
246 disable_irq(queue->rx_irq);
Zoltan Kiss8fe78982014-10-28 15:29:30 +0000247 napi_disable(&queue->napi);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100248 del_timer_sync(&queue->credit_timeout);
249 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000250}
251
252static int xenvif_open(struct net_device *dev)
253{
254 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100255 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000256 xenvif_up(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100257 netif_tx_start_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000258 return 0;
259}
260
261static int xenvif_close(struct net_device *dev)
262{
263 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100264 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000265 xenvif_down(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100266 netif_tx_stop_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000267 return 0;
268}
269
270static int xenvif_change_mtu(struct net_device *dev, int mtu)
271{
272 struct xenvif *vif = netdev_priv(dev);
273 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
274
275 if (mtu > max)
276 return -EINVAL;
277 dev->mtu = mtu;
278 return 0;
279}
280
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000281static netdev_features_t xenvif_fix_features(struct net_device *dev,
282 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000283{
284 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000285
Michał Mirosław47103042011-04-19 03:35:06 +0000286 if (!vif->can_sg)
287 features &= ~NETIF_F_SG;
Paul Durrant82cada22013-10-16 17:50:32 +0100288 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000289 features &= ~NETIF_F_TSO;
Paul Durrant82cada22013-10-16 17:50:32 +0100290 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
291 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100292 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000293 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100294 if (!vif->ipv6_csum)
295 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000296
Michał Mirosław47103042011-04-19 03:35:06 +0000297 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000298}
299
300static const struct xenvif_stat {
301 char name[ETH_GSTRING_LEN];
302 u16 offset;
303} xenvif_stats[] = {
304 {
305 "rx_gso_checksum_fixup",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100306 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
Ian Campbellf942dc22011-03-15 00:06:18 +0000307 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000308 /* If (sent != success + fail), there are probably packets never
309 * freed up properly!
310 */
311 {
312 "tx_zerocopy_sent",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100313 offsetof(struct xenvif_stats, tx_zerocopy_sent),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000314 },
315 {
316 "tx_zerocopy_success",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100317 offsetof(struct xenvif_stats, tx_zerocopy_success),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000318 },
319 {
320 "tx_zerocopy_fail",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100321 offsetof(struct xenvif_stats, tx_zerocopy_fail)
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000322 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000323 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
324 * a guest with the same MAX_SKB_FRAG
325 */
326 {
327 "tx_frag_overflow",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100328 offsetof(struct xenvif_stats, tx_frag_overflow)
Zoltan Kisse3377f32014-03-06 21:48:29 +0000329 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000330};
331
332static int xenvif_get_sset_count(struct net_device *dev, int string_set)
333{
334 switch (string_set) {
335 case ETH_SS_STATS:
336 return ARRAY_SIZE(xenvif_stats);
337 default:
338 return -EINVAL;
339 }
340}
341
342static void xenvif_get_ethtool_stats(struct net_device *dev,
343 struct ethtool_stats *stats, u64 * data)
344{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100345 struct xenvif *vif = netdev_priv(dev);
Wei Liuf7b50c42014-06-23 10:50:17 +0100346 unsigned int num_queues = vif->num_queues;
Ian Campbellf942dc22011-03-15 00:06:18 +0000347 int i;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100348 unsigned int queue_index;
Ian Campbellf942dc22011-03-15 00:06:18 +0000349
Wei Liue9ce7cb2014-06-04 10:30:42 +0100350 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
351 unsigned long accum = 0;
352 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
David Vrabeld63951d2015-03-04 11:14:46 +0000353 void *vif_stats = &vif->queues[queue_index].stats;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100354 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
355 }
356 data[i] = accum;
357 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000358}
359
360static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
361{
362 int i;
363
364 switch (stringset) {
365 case ETH_SS_STATS:
366 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
367 memcpy(data + i * ETH_GSTRING_LEN,
368 xenvif_stats[i].name, ETH_GSTRING_LEN);
369 break;
370 }
371}
372
stephen hemminger813abbb2012-01-04 11:56:58 +0000373static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000374 .get_link = ethtool_op_get_link,
375
376 .get_sset_count = xenvif_get_sset_count,
377 .get_ethtool_stats = xenvif_get_ethtool_stats,
378 .get_strings = xenvif_get_strings,
379};
380
stephen hemminger813abbb2012-01-04 11:56:58 +0000381static const struct net_device_ops xenvif_netdev_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000382 .ndo_start_xmit = xenvif_start_xmit,
383 .ndo_get_stats = xenvif_get_stats,
384 .ndo_open = xenvif_open,
385 .ndo_stop = xenvif_close,
386 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000387 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000388 .ndo_set_mac_address = eth_mac_addr,
389 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000390};
391
392struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
393 unsigned int handle)
394{
395 int err;
396 struct net_device *dev;
397 struct xenvif *vif;
398 char name[IFNAMSIZ] = {};
399
400 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100401 /* Allocate a netdev with the max. supported number of queues.
402 * When the guest selects the desired number, it will be updated
Wei Liuf7b50c42014-06-23 10:50:17 +0100403 * via netif_set_real_num_*_queues().
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100404 */
Tom Gundersenc835a672014-07-14 16:37:24 +0200405 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
406 ether_setup, xenvif_max_queues);
Ian Campbellf942dc22011-03-15 00:06:18 +0000407 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100408 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000409 return ERR_PTR(-ENOMEM);
410 }
411
412 SET_NETDEV_DEV(dev, parent);
413
414 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000415
Ian Campbellf942dc22011-03-15 00:06:18 +0000416 vif->domid = domid;
417 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000418 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100419 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000420 vif->dev = dev;
Wei Liue9d8b2c2014-04-01 12:46:12 +0100421 vif->disabled = false;
David Vrabel26c0e102014-12-18 11:13:06 +0000422 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
423 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
Wei Liue9d8b2c2014-04-01 12:46:12 +0100424
Wei Liuf7b50c42014-06-23 10:50:17 +0100425 /* Start out with no queues. */
Wei Liue9ce7cb2014-06-04 10:30:42 +0100426 vif->queues = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100427 vif->num_queues = 0;
Zoltan Kiss09350782014-03-06 21:48:30 +0000428
David Vrabelecf08d22014-10-22 14:08:55 +0100429 spin_lock_init(&vif->lock);
430
Ian Campbellf942dc22011-03-15 00:06:18 +0000431 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100432 dev->hw_features = NETIF_F_SG |
433 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Paul Durrant82cada22013-10-16 17:50:32 +0100434 NETIF_F_TSO | NETIF_F_TSO6;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100435 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000436 dev->ethtool_ops = &xenvif_ethtool_ops;
Ian Campbellf942dc22011-03-15 00:06:18 +0000437
438 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
439
440 /*
441 * Initialise a dummy MAC address. We choose the numerically
442 * largest non-broadcast address to prevent the address getting
443 * stolen by an Ethernet bridge for STP purposes.
444 * (FE:FF:FF:FF:FF:FF)
445 */
Joe Perches3b6ed262015-03-02 19:54:51 -0800446 eth_broadcast_addr(dev->dev_addr);
Ian Campbellf942dc22011-03-15 00:06:18 +0000447 dev->dev_addr[0] &= ~0x01;
448
449 netif_carrier_off(dev);
450
451 err = register_netdev(dev);
452 if (err) {
453 netdev_warn(dev, "Could not register device: err=%d\n", err);
454 free_netdev(dev);
455 return ERR_PTR(err);
456 }
457
458 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100459
460 __module_get(THIS_MODULE);
461
Ian Campbellf942dc22011-03-15 00:06:18 +0000462 return vif;
463}
464
Wei Liue9ce7cb2014-06-04 10:30:42 +0100465int xenvif_init_queue(struct xenvif_queue *queue)
Ian Campbellf942dc22011-03-15 00:06:18 +0000466{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100467 int err, i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000468
Wei Liue9ce7cb2014-06-04 10:30:42 +0100469 queue->credit_bytes = queue->remaining_credit = ~0UL;
470 queue->credit_usec = 0UL;
471 init_timer(&queue->credit_timeout);
Palik, Imreedafc132015-03-19 11:05:42 +0100472 queue->credit_timeout.function = xenvif_tx_credit_callback;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100473 queue->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000474
David Vrabelf48da8b2014-10-22 14:08:54 +0100475 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
476
Wei Liue9ce7cb2014-06-04 10:30:42 +0100477 skb_queue_head_init(&queue->rx_queue);
478 skb_queue_head_init(&queue->tx_queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000479
Wei Liue9ce7cb2014-06-04 10:30:42 +0100480 queue->pending_cons = 0;
481 queue->pending_prod = MAX_PENDING_REQS;
482 for (i = 0; i < MAX_PENDING_REQS; ++i)
483 queue->pending_ring[i] = i;
Paul Durrantca2f09f2013-12-06 16:36:07 +0000484
Wei Liue9ce7cb2014-06-04 10:30:42 +0100485 spin_lock_init(&queue->callback_lock);
486 spin_lock_init(&queue->response_lock);
Wei Liue1f00a692013-05-22 06:34:45 +0000487
Wei Liue9ce7cb2014-06-04 10:30:42 +0100488 /* If ballooning is disabled, this will consume real memory, so you
489 * better enable it. The long term solution would be to use just a
490 * bunch of valid page descriptors, without dependency on ballooning
491 */
David Vrabelff4b1562015-01-08 18:06:01 +0000492 err = gnttab_alloc_pages(MAX_PENDING_REQS,
493 queue->mmap_pages);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100494 if (err) {
495 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
496 return -ENOMEM;
Wei Liue1f00a692013-05-22 06:34:45 +0000497 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000498
Wei Liue9ce7cb2014-06-04 10:30:42 +0100499 for (i = 0; i < MAX_PENDING_REQS; i++) {
500 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
501 { .callback = xenvif_zerocopy_callback,
502 .ctx = NULL,
503 .desc = i };
504 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
Wei Liub3f980b2013-08-26 12:59:38 +0100505 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000506
Wei Liue9ce7cb2014-06-04 10:30:42 +0100507 return 0;
508}
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000509
Wei Liue9ce7cb2014-06-04 10:30:42 +0100510void xenvif_carrier_on(struct xenvif *vif)
511{
Ian Campbellf942dc22011-03-15 00:06:18 +0000512 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000513 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
514 dev_set_mtu(vif->dev, ETH_DATA_LEN);
515 netdev_update_features(vif->dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100516 set_bit(VIF_STATUS_CONNECTED, &vif->status);
David Vrabeld0e5d832011-09-30 06:37:51 +0000517 if (netif_running(vif->dev))
518 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000519 rtnl_unlock();
Wei Liue9ce7cb2014-06-04 10:30:42 +0100520}
Ian Campbellf942dc22011-03-15 00:06:18 +0000521
Wei Liue9ce7cb2014-06-04 10:30:42 +0100522int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
523 unsigned long rx_ring_ref, unsigned int tx_evtchn,
524 unsigned int rx_evtchn)
525{
526 struct task_struct *task;
527 int err = -ENOMEM;
528
529 BUG_ON(queue->tx_irq);
530 BUG_ON(queue->task);
531 BUG_ON(queue->dealloc_task);
532
533 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
534 if (err < 0)
535 goto err;
536
537 init_waitqueue_head(&queue->wq);
538 init_waitqueue_head(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +0100539 atomic_set(&queue->inflight_packets, 0);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100540
Wei Liue24f8192014-08-25 16:44:00 +0100541 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
542 XENVIF_NAPI_WEIGHT);
543
Wei Liue9ce7cb2014-06-04 10:30:42 +0100544 if (tx_evtchn == rx_evtchn) {
545 /* feature-split-event-channels == 0 */
546 err = bind_interdomain_evtchn_to_irqhandler(
547 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
548 queue->name, queue);
549 if (err < 0)
550 goto err_unmap;
551 queue->tx_irq = queue->rx_irq = err;
552 disable_irq(queue->tx_irq);
553 } else {
554 /* feature-split-event-channels == 1 */
555 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
556 "%s-tx", queue->name);
557 err = bind_interdomain_evtchn_to_irqhandler(
558 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
559 queue->tx_irq_name, queue);
560 if (err < 0)
561 goto err_unmap;
562 queue->tx_irq = err;
563 disable_irq(queue->tx_irq);
564
565 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
566 "%s-rx", queue->name);
567 err = bind_interdomain_evtchn_to_irqhandler(
568 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
569 queue->rx_irq_name, queue);
570 if (err < 0)
571 goto err_tx_unbind;
572 queue->rx_irq = err;
573 disable_irq(queue->rx_irq);
574 }
575
David Vrabelecf08d22014-10-22 14:08:55 +0100576 queue->stalled = true;
577
Wei Liue9ce7cb2014-06-04 10:30:42 +0100578 task = kthread_create(xenvif_kthread_guest_rx,
579 (void *)queue, "%s-guest-rx", queue->name);
580 if (IS_ERR(task)) {
581 pr_warn("Could not allocate kthread for %s\n", queue->name);
582 err = PTR_ERR(task);
583 goto err_rx_unbind;
584 }
585 queue->task = task;
David Vrabel42b52122015-02-02 16:57:51 +0000586 get_task_struct(task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100587
588 task = kthread_create(xenvif_dealloc_kthread,
589 (void *)queue, "%s-dealloc", queue->name);
590 if (IS_ERR(task)) {
591 pr_warn("Could not allocate kthread for %s\n", queue->name);
592 err = PTR_ERR(task);
593 goto err_rx_unbind;
594 }
595 queue->dealloc_task = task;
596
597 wake_up_process(queue->task);
598 wake_up_process(queue->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100599
Ian Campbellf942dc22011-03-15 00:06:18 +0000600 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100601
602err_rx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100603 unbind_from_irqhandler(queue->rx_irq, queue);
604 queue->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000605err_tx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100606 unbind_from_irqhandler(queue->tx_irq, queue);
607 queue->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000608err_unmap:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100609 xenvif_unmap_frontend_rings(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000610err:
Wei Liub103f352013-05-16 23:26:11 +0000611 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000612 return err;
613}
614
Ian Campbell488562862013-02-06 23:41:35 +0000615void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000616{
617 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000618
619 rtnl_lock();
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100620 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
621 netif_carrier_off(dev); /* discard queued packets */
622 if (netif_running(dev))
623 xenvif_down(vif);
624 }
Ian Campbell488562862013-02-06 23:41:35 +0000625 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000626}
627
628void xenvif_disconnect(struct xenvif *vif)
629{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100630 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100631 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100632 unsigned int queue_index;
633
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100634 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000635
Wei Liue9ce7cb2014-06-04 10:30:42 +0100636 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
637 queue = &vif->queues[queue_index];
David Vrabeldb739ef2013-11-21 15:26:09 +0000638
Wei Liuea2c5e12014-08-12 11:48:06 +0100639 netif_napi_del(&queue->napi);
640
Wei Liue9ce7cb2014-06-04 10:30:42 +0100641 if (queue->task) {
Wei Liue9ce7cb2014-06-04 10:30:42 +0100642 kthread_stop(queue->task);
David Vrabel42b52122015-02-02 16:57:51 +0000643 put_task_struct(queue->task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100644 queue->task = NULL;
Wei Liue1f00a692013-05-22 06:34:45 +0000645 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000646
Wei Liue9ce7cb2014-06-04 10:30:42 +0100647 if (queue->dealloc_task) {
648 kthread_stop(queue->dealloc_task);
649 queue->dealloc_task = NULL;
650 }
651
652 if (queue->tx_irq) {
653 if (queue->tx_irq == queue->rx_irq)
654 unbind_from_irqhandler(queue->tx_irq, queue);
655 else {
656 unbind_from_irqhandler(queue->tx_irq, queue);
657 unbind_from_irqhandler(queue->rx_irq, queue);
658 }
659 queue->tx_irq = 0;
660 }
661
662 xenvif_unmap_frontend_rings(queue);
663 }
Paul Durrant279f4382013-09-17 17:46:08 +0100664}
665
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100666/* Reverse the relevant parts of xenvif_init_queue().
667 * Used for queue teardown from xenvif_free(), and on the
668 * error handling paths in xenbus.c:connect().
669 */
670void xenvif_deinit_queue(struct xenvif_queue *queue)
671{
David Vrabelff4b1562015-01-08 18:06:01 +0000672 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100673}
674
Paul Durrant279f4382013-09-17 17:46:08 +0100675void xenvif_free(struct xenvif *vif)
676{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100677 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100678 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100679 unsigned int queue_index;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000680
Wei Liue9ce7cb2014-06-04 10:30:42 +0100681 unregister_netdev(vif->dev);
682
683 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
684 queue = &vif->queues[queue_index];
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100685 xenvif_deinit_queue(queue);
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000686 }
687
Wei Liue9ce7cb2014-06-04 10:30:42 +0100688 vfree(vif->queues);
689 vif->queues = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100690 vif->num_queues = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000691
Ian Campbellf942dc22011-03-15 00:06:18 +0000692 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000693
Paul Durrant279f4382013-09-17 17:46:08 +0100694 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000695}