Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Network-device interface management. |
| 3 | * |
| 4 | * Copyright (c) 2004-2005, Keir Fraser |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License version 2 |
| 8 | * as published by the Free Software Foundation; or, when distributed |
| 9 | * separately from the Linux kernel or incorporated into other |
| 10 | * software packages, subject to the following license: |
| 11 | * |
| 12 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 13 | * of this source file (the "Software"), to deal in the Software without |
| 14 | * restriction, including without limitation the rights to use, copy, modify, |
| 15 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
| 16 | * and to permit persons to whom the Software is furnished to do so, subject to |
| 17 | * the following conditions: |
| 18 | * |
| 19 | * The above copyright notice and this permission notice shall be included in |
| 20 | * all copies or substantial portions of the Software. |
| 21 | * |
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 28 | * IN THE SOFTWARE. |
| 29 | */ |
| 30 | |
| 31 | #include "common.h" |
| 32 | |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 33 | #include <linux/kthread.h> |
Ingo Molnar | 0881e7b | 2017-02-05 15:30:50 +0100 | [diff] [blame] | 34 | #include <linux/sched/task.h> |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 35 | #include <linux/ethtool.h> |
| 36 | #include <linux/rtnetlink.h> |
| 37 | #include <linux/if_vlan.h> |
Arnd Bergmann | e7b599d | 2014-06-10 10:34:36 +0200 | [diff] [blame] | 38 | #include <linux/vmalloc.h> |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 39 | |
| 40 | #include <xen/events.h> |
| 41 | #include <asm/xen/hypercall.h> |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 42 | #include <xen/balloon.h> |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 43 | |
| 44 | #define XENVIF_QUEUE_LENGTH 32 |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 45 | #define XENVIF_NAPI_WEIGHT 64 |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 46 | |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 47 | /* Number of bytes allowed on the internal guest Rx queue. */ |
| 48 | #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) |
| 49 | |
Wei Liu | a64bd93 | 2014-08-12 11:48:07 +0100 | [diff] [blame] | 50 | /* This function is used to set SKBTX_DEV_ZEROCOPY as well as |
| 51 | * increasing the inflight counter. We need to increase the inflight |
| 52 | * counter because core driver calls into xenvif_zerocopy_callback |
| 53 | * which calls xenvif_skb_zerocopy_complete. |
| 54 | */ |
| 55 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, |
| 56 | struct sk_buff *skb) |
| 57 | { |
| 58 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
| 59 | atomic_inc(&queue->inflight_packets); |
| 60 | } |
| 61 | |
| 62 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) |
| 63 | { |
| 64 | atomic_dec(&queue->inflight_packets); |
Ross Lagerwall | 57b2290 | 2015-08-04 15:40:59 +0100 | [diff] [blame] | 65 | |
| 66 | /* Wake the dealloc thread _after_ decrementing inflight_packets so |
| 67 | * that if kthread_stop() has already been called, the dealloc thread |
| 68 | * does not wait forever with nothing to wake it. |
| 69 | */ |
| 70 | wake_up(&queue->dealloc_wq); |
Wei Liu | a64bd93 | 2014-08-12 11:48:07 +0100 | [diff] [blame] | 71 | } |
| 72 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 73 | int xenvif_schedulable(struct xenvif *vif) |
| 74 | { |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 75 | return netif_running(vif->dev) && |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 76 | test_bit(VIF_STATUS_CONNECTED, &vif->status) && |
| 77 | !vif->disabled; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 78 | } |
| 79 | |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 80 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 81 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 82 | struct xenvif_queue *queue = dev_id; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 83 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 84 | if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) |
| 85 | napi_schedule(&queue->napi); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 86 | |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 87 | return IRQ_HANDLED; |
| 88 | } |
| 89 | |
Lad, Prabhakar | 38741d5 | 2015-02-05 13:38:07 +0000 | [diff] [blame] | 90 | static int xenvif_poll(struct napi_struct *napi, int budget) |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 91 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 92 | struct xenvif_queue *queue = |
| 93 | container_of(napi, struct xenvif_queue, napi); |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 94 | int work_done; |
| 95 | |
Wei Liu | e9d8b2c | 2014-04-01 12:46:12 +0100 | [diff] [blame] | 96 | /* This vif is rogue, we pretend we've there is nothing to do |
| 97 | * for this vif to deschedule it from NAPI. But this interface |
| 98 | * will be turned off in thread context later. |
| 99 | */ |
Zoltan Kiss | 2561cc1 | 2014-08-11 13:01:44 +0100 | [diff] [blame] | 100 | if (unlikely(queue->vif->disabled)) { |
Wei Liu | e9d8b2c | 2014-04-01 12:46:12 +0100 | [diff] [blame] | 101 | napi_complete(napi); |
| 102 | return 0; |
| 103 | } |
| 104 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 105 | work_done = xenvif_tx_action(queue, budget); |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 106 | |
| 107 | if (work_done < budget) { |
Eric Dumazet | 6ad2016 | 2017-01-30 08:22:01 -0800 | [diff] [blame] | 108 | napi_complete_done(napi, work_done); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 109 | xenvif_napi_schedule_or_enable_events(queue); |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | return work_done; |
| 113 | } |
| 114 | |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 115 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
| 116 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 117 | struct xenvif_queue *queue = dev_id; |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 118 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 119 | xenvif_kick_thread(queue); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 120 | |
| 121 | return IRQ_HANDLED; |
| 122 | } |
| 123 | |
Zoltan Kiss | f51de24 | 2014-07-08 19:49:14 +0100 | [diff] [blame] | 124 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 125 | { |
| 126 | xenvif_tx_interrupt(irq, dev_id); |
| 127 | xenvif_rx_interrupt(irq, dev_id); |
| 128 | |
| 129 | return IRQ_HANDLED; |
| 130 | } |
| 131 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 132 | int xenvif_queue_stopped(struct xenvif_queue *queue) |
Zoltan Kiss | 0935078 | 2014-03-06 21:48:30 +0000 | [diff] [blame] | 133 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 134 | struct net_device *dev = queue->vif->dev; |
| 135 | unsigned int id = queue->id; |
| 136 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); |
| 137 | } |
Zoltan Kiss | 0935078 | 2014-03-06 21:48:30 +0000 | [diff] [blame] | 138 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 139 | void xenvif_wake_queue(struct xenvif_queue *queue) |
| 140 | { |
| 141 | struct net_device *dev = queue->vif->dev; |
| 142 | unsigned int id = queue->id; |
| 143 | netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); |
| 144 | } |
| 145 | |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 146 | static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 147 | void *accel_priv, |
| 148 | select_queue_fallback_t fallback) |
| 149 | { |
| 150 | struct xenvif *vif = netdev_priv(dev); |
| 151 | unsigned int size = vif->hash.size; |
| 152 | |
Paul Durrant | 912e27e | 2016-10-07 09:32:31 +0100 | [diff] [blame] | 153 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) |
| 154 | return fallback(dev, skb) % dev->real_num_tx_queues; |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 155 | |
| 156 | xenvif_set_skb_hash(vif, skb); |
| 157 | |
| 158 | if (size == 0) |
| 159 | return skb_get_hash_raw(skb) % dev->real_num_tx_queues; |
| 160 | |
| 161 | return vif->hash.mapping[skb_get_hash_raw(skb) % size]; |
| 162 | } |
| 163 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 164 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 165 | { |
| 166 | struct xenvif *vif = netdev_priv(dev); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 167 | struct xenvif_queue *queue = NULL; |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 168 | unsigned int num_queues = vif->num_queues; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 169 | u16 index; |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 170 | struct xenvif_rx_cb *cb; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 171 | |
| 172 | BUG_ON(skb->dev != dev); |
| 173 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 174 | /* Drop the packet if queues are not set up */ |
| 175 | if (num_queues < 1) |
| 176 | goto drop; |
| 177 | |
| 178 | /* Obtain the queue to be used to transmit this packet */ |
| 179 | index = skb_get_queue_mapping(skb); |
| 180 | if (index >= num_queues) { |
| 181 | pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", |
| 182 | index, vif->dev->name); |
| 183 | index %= num_queues; |
| 184 | } |
| 185 | queue = &vif->queues[index]; |
| 186 | |
| 187 | /* Drop the packet if queue is not ready */ |
| 188 | if (queue->task == NULL || |
| 189 | queue->dealloc_task == NULL || |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 190 | !xenvif_schedulable(vif)) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 191 | goto drop; |
| 192 | |
Paul Durrant | 210c34d | 2015-09-02 17:58:36 +0100 | [diff] [blame] | 193 | if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { |
| 194 | struct ethhdr *eth = (struct ethhdr *)skb->data; |
| 195 | |
| 196 | if (!xenvif_mcast_match(vif, eth->h_dest)) |
| 197 | goto drop; |
| 198 | } |
| 199 | |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 200 | cb = XENVIF_RX_CB(skb); |
David Vrabel | 26c0e10 | 2014-12-18 11:13:06 +0000 | [diff] [blame] | 201 | cb->expires = jiffies + vif->drain_timeout; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 202 | |
Paul Durrant | 912e27e | 2016-10-07 09:32:31 +0100 | [diff] [blame] | 203 | /* If there is no hash algorithm configured then make sure there |
| 204 | * is no hash information in the socket buffer otherwise it |
| 205 | * would be incorrectly forwarded to the frontend. |
| 206 | */ |
| 207 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) |
| 208 | skb_clear_hash(skb); |
| 209 | |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 210 | xenvif_rx_queue_tail(queue, skb); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 211 | xenvif_kick_thread(queue); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 212 | |
| 213 | return NETDEV_TX_OK; |
| 214 | |
| 215 | drop: |
| 216 | vif->dev->stats.tx_dropped++; |
| 217 | dev_kfree_skb(skb); |
| 218 | return NETDEV_TX_OK; |
| 219 | } |
| 220 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 221 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
| 222 | { |
| 223 | struct xenvif *vif = netdev_priv(dev); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 224 | struct xenvif_queue *queue = NULL; |
Mart van Santen | ebf692f | 2017-02-10 12:02:18 +0000 | [diff] [blame] | 225 | u64 rx_bytes = 0; |
| 226 | u64 rx_packets = 0; |
| 227 | u64 tx_bytes = 0; |
| 228 | u64 tx_packets = 0; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 229 | unsigned int index; |
| 230 | |
Igor Druzhinin | f16f1df | 2017-01-17 20:49:38 +0000 | [diff] [blame] | 231 | spin_lock(&vif->lock); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 232 | if (vif->queues == NULL) |
| 233 | goto out; |
| 234 | |
| 235 | /* Aggregate tx and rx stats from each queue */ |
Igor Druzhinin | f16f1df | 2017-01-17 20:49:38 +0000 | [diff] [blame] | 236 | for (index = 0; index < vif->num_queues; ++index) { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 237 | queue = &vif->queues[index]; |
| 238 | rx_bytes += queue->stats.rx_bytes; |
| 239 | rx_packets += queue->stats.rx_packets; |
| 240 | tx_bytes += queue->stats.tx_bytes; |
| 241 | tx_packets += queue->stats.tx_packets; |
| 242 | } |
| 243 | |
| 244 | out: |
Igor Druzhinin | f16f1df | 2017-01-17 20:49:38 +0000 | [diff] [blame] | 245 | spin_unlock(&vif->lock); |
| 246 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 247 | vif->dev->stats.rx_bytes = rx_bytes; |
| 248 | vif->dev->stats.rx_packets = rx_packets; |
| 249 | vif->dev->stats.tx_bytes = tx_bytes; |
| 250 | vif->dev->stats.tx_packets = tx_packets; |
| 251 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 252 | return &vif->dev->stats; |
| 253 | } |
| 254 | |
| 255 | static void xenvif_up(struct xenvif *vif) |
| 256 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 257 | struct xenvif_queue *queue = NULL; |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 258 | unsigned int num_queues = vif->num_queues; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 259 | unsigned int queue_index; |
| 260 | |
| 261 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 262 | queue = &vif->queues[queue_index]; |
| 263 | napi_enable(&queue->napi); |
| 264 | enable_irq(queue->tx_irq); |
| 265 | if (queue->tx_irq != queue->rx_irq) |
| 266 | enable_irq(queue->rx_irq); |
| 267 | xenvif_napi_schedule_or_enable_events(queue); |
| 268 | } |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | static void xenvif_down(struct xenvif *vif) |
| 272 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 273 | struct xenvif_queue *queue = NULL; |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 274 | unsigned int num_queues = vif->num_queues; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 275 | unsigned int queue_index; |
| 276 | |
| 277 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 278 | queue = &vif->queues[queue_index]; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 279 | disable_irq(queue->tx_irq); |
| 280 | if (queue->tx_irq != queue->rx_irq) |
| 281 | disable_irq(queue->rx_irq); |
Zoltan Kiss | 8fe7898 | 2014-10-28 15:29:30 +0000 | [diff] [blame] | 282 | napi_disable(&queue->napi); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 283 | del_timer_sync(&queue->credit_timeout); |
| 284 | } |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | static int xenvif_open(struct net_device *dev) |
| 288 | { |
| 289 | struct xenvif *vif = netdev_priv(dev); |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 290 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 291 | xenvif_up(vif); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 292 | netif_tx_start_all_queues(dev); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 293 | return 0; |
| 294 | } |
| 295 | |
| 296 | static int xenvif_close(struct net_device *dev) |
| 297 | { |
| 298 | struct xenvif *vif = netdev_priv(dev); |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 299 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 300 | xenvif_down(vif); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 301 | netif_tx_stop_all_queues(dev); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | static int xenvif_change_mtu(struct net_device *dev, int mtu) |
| 306 | { |
| 307 | struct xenvif *vif = netdev_priv(dev); |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 308 | int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 309 | |
| 310 | if (mtu > max) |
| 311 | return -EINVAL; |
| 312 | dev->mtu = mtu; |
| 313 | return 0; |
| 314 | } |
| 315 | |
Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 316 | static netdev_features_t xenvif_fix_features(struct net_device *dev, |
| 317 | netdev_features_t features) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 318 | { |
| 319 | struct xenvif *vif = netdev_priv(dev); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 320 | |
Michał Mirosław | 4710304 | 2011-04-19 03:35:06 +0000 | [diff] [blame] | 321 | if (!vif->can_sg) |
| 322 | features &= ~NETIF_F_SG; |
Paul Durrant | fedbc8c | 2016-10-04 10:29:13 +0100 | [diff] [blame] | 323 | if (~(vif->gso_mask) & GSO_BIT(TCPV4)) |
Michał Mirosław | 4710304 | 2011-04-19 03:35:06 +0000 | [diff] [blame] | 324 | features &= ~NETIF_F_TSO; |
Paul Durrant | fedbc8c | 2016-10-04 10:29:13 +0100 | [diff] [blame] | 325 | if (~(vif->gso_mask) & GSO_BIT(TCPV6)) |
Paul Durrant | 82cada2 | 2013-10-16 17:50:32 +0100 | [diff] [blame] | 326 | features &= ~NETIF_F_TSO6; |
Paul Durrant | 146c8a7 | 2013-10-16 17:50:28 +0100 | [diff] [blame] | 327 | if (!vif->ip_csum) |
Michał Mirosław | 4710304 | 2011-04-19 03:35:06 +0000 | [diff] [blame] | 328 | features &= ~NETIF_F_IP_CSUM; |
Paul Durrant | 146c8a7 | 2013-10-16 17:50:28 +0100 | [diff] [blame] | 329 | if (!vif->ipv6_csum) |
| 330 | features &= ~NETIF_F_IPV6_CSUM; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 331 | |
Michał Mirosław | 4710304 | 2011-04-19 03:35:06 +0000 | [diff] [blame] | 332 | return features; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 333 | } |
| 334 | |
| 335 | static const struct xenvif_stat { |
| 336 | char name[ETH_GSTRING_LEN]; |
| 337 | u16 offset; |
| 338 | } xenvif_stats[] = { |
| 339 | { |
| 340 | "rx_gso_checksum_fixup", |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 341 | offsetof(struct xenvif_stats, rx_gso_checksum_fixup) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 342 | }, |
Zoltan Kiss | 1bb332a | 2014-03-06 21:48:28 +0000 | [diff] [blame] | 343 | /* If (sent != success + fail), there are probably packets never |
| 344 | * freed up properly! |
| 345 | */ |
| 346 | { |
| 347 | "tx_zerocopy_sent", |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 348 | offsetof(struct xenvif_stats, tx_zerocopy_sent), |
Zoltan Kiss | 1bb332a | 2014-03-06 21:48:28 +0000 | [diff] [blame] | 349 | }, |
| 350 | { |
| 351 | "tx_zerocopy_success", |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 352 | offsetof(struct xenvif_stats, tx_zerocopy_success), |
Zoltan Kiss | 1bb332a | 2014-03-06 21:48:28 +0000 | [diff] [blame] | 353 | }, |
| 354 | { |
| 355 | "tx_zerocopy_fail", |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 356 | offsetof(struct xenvif_stats, tx_zerocopy_fail) |
Zoltan Kiss | 1bb332a | 2014-03-06 21:48:28 +0000 | [diff] [blame] | 357 | }, |
Zoltan Kiss | e3377f3 | 2014-03-06 21:48:29 +0000 | [diff] [blame] | 358 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use |
| 359 | * a guest with the same MAX_SKB_FRAG |
| 360 | */ |
| 361 | { |
| 362 | "tx_frag_overflow", |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 363 | offsetof(struct xenvif_stats, tx_frag_overflow) |
Zoltan Kiss | e3377f3 | 2014-03-06 21:48:29 +0000 | [diff] [blame] | 364 | }, |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 365 | }; |
| 366 | |
| 367 | static int xenvif_get_sset_count(struct net_device *dev, int string_set) |
| 368 | { |
| 369 | switch (string_set) { |
| 370 | case ETH_SS_STATS: |
| 371 | return ARRAY_SIZE(xenvif_stats); |
| 372 | default: |
| 373 | return -EINVAL; |
| 374 | } |
| 375 | } |
| 376 | |
| 377 | static void xenvif_get_ethtool_stats(struct net_device *dev, |
| 378 | struct ethtool_stats *stats, u64 * data) |
| 379 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 380 | struct xenvif *vif = netdev_priv(dev); |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 381 | unsigned int num_queues = vif->num_queues; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 382 | int i; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 383 | unsigned int queue_index; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 384 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 385 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
| 386 | unsigned long accum = 0; |
| 387 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
David Vrabel | d63951d | 2015-03-04 11:14:46 +0000 | [diff] [blame] | 388 | void *vif_stats = &vif->queues[queue_index].stats; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 389 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
| 390 | } |
| 391 | data[i] = accum; |
| 392 | } |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) |
| 396 | { |
| 397 | int i; |
| 398 | |
| 399 | switch (stringset) { |
| 400 | case ETH_SS_STATS: |
| 401 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) |
| 402 | memcpy(data + i * ETH_GSTRING_LEN, |
| 403 | xenvif_stats[i].name, ETH_GSTRING_LEN); |
| 404 | break; |
| 405 | } |
| 406 | } |
| 407 | |
stephen hemminger | 813abbb | 2012-01-04 11:56:58 +0000 | [diff] [blame] | 408 | static const struct ethtool_ops xenvif_ethtool_ops = { |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 409 | .get_link = ethtool_op_get_link, |
| 410 | |
| 411 | .get_sset_count = xenvif_get_sset_count, |
| 412 | .get_ethtool_stats = xenvif_get_ethtool_stats, |
| 413 | .get_strings = xenvif_get_strings, |
| 414 | }; |
| 415 | |
stephen hemminger | 813abbb | 2012-01-04 11:56:58 +0000 | [diff] [blame] | 416 | static const struct net_device_ops xenvif_netdev_ops = { |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 417 | .ndo_select_queue = xenvif_select_queue, |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 418 | .ndo_start_xmit = xenvif_start_xmit, |
| 419 | .ndo_get_stats = xenvif_get_stats, |
| 420 | .ndo_open = xenvif_open, |
| 421 | .ndo_stop = xenvif_close, |
| 422 | .ndo_change_mtu = xenvif_change_mtu, |
Michał Mirosław | 4710304 | 2011-04-19 03:35:06 +0000 | [diff] [blame] | 423 | .ndo_fix_features = xenvif_fix_features, |
Matt Wilson | 4a633a6 | 2013-01-22 08:08:25 +0000 | [diff] [blame] | 424 | .ndo_set_mac_address = eth_mac_addr, |
| 425 | .ndo_validate_addr = eth_validate_addr, |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 426 | }; |
| 427 | |
| 428 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, |
| 429 | unsigned int handle) |
| 430 | { |
| 431 | int err; |
| 432 | struct net_device *dev; |
| 433 | struct xenvif *vif; |
| 434 | char name[IFNAMSIZ] = {}; |
| 435 | |
| 436 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); |
Andrew J. Bennieston | 8d3d53b | 2014-06-04 10:30:43 +0100 | [diff] [blame] | 437 | /* Allocate a netdev with the max. supported number of queues. |
| 438 | * When the guest selects the desired number, it will be updated |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 439 | * via netif_set_real_num_*_queues(). |
Andrew J. Bennieston | 8d3d53b | 2014-06-04 10:30:43 +0100 | [diff] [blame] | 440 | */ |
Tom Gundersen | c835a67 | 2014-07-14 16:37:24 +0200 | [diff] [blame] | 441 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, |
| 442 | ether_setup, xenvif_max_queues); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 443 | if (dev == NULL) { |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 444 | pr_warn("Could not allocate netdev for %s\n", name); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 445 | return ERR_PTR(-ENOMEM); |
| 446 | } |
| 447 | |
| 448 | SET_NETDEV_DEV(dev, parent); |
| 449 | |
| 450 | vif = netdev_priv(dev); |
Paul Durrant | ac3d5ac | 2013-12-23 09:27:17 +0000 | [diff] [blame] | 451 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 452 | vif->domid = domid; |
| 453 | vif->handle = handle; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 454 | vif->can_sg = 1; |
Paul Durrant | 146c8a7 | 2013-10-16 17:50:28 +0100 | [diff] [blame] | 455 | vif->ip_csum = 1; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 456 | vif->dev = dev; |
Wei Liu | e9d8b2c | 2014-04-01 12:46:12 +0100 | [diff] [blame] | 457 | vif->disabled = false; |
David Vrabel | 26c0e10 | 2014-12-18 11:13:06 +0000 | [diff] [blame] | 458 | vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs); |
| 459 | vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs); |
Wei Liu | e9d8b2c | 2014-04-01 12:46:12 +0100 | [diff] [blame] | 460 | |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 461 | /* Start out with no queues. */ |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 462 | vif->queues = NULL; |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 463 | vif->num_queues = 0; |
Zoltan Kiss | 0935078 | 2014-03-06 21:48:30 +0000 | [diff] [blame] | 464 | |
David Vrabel | ecf08d2 | 2014-10-22 14:08:55 +0100 | [diff] [blame] | 465 | spin_lock_init(&vif->lock); |
Paul Durrant | 210c34d | 2015-09-02 17:58:36 +0100 | [diff] [blame] | 466 | INIT_LIST_HEAD(&vif->fe_mcast_addr); |
David Vrabel | ecf08d2 | 2014-10-22 14:08:55 +0100 | [diff] [blame] | 467 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 468 | dev->netdev_ops = &xenvif_netdev_ops; |
Paul Durrant | 146c8a7 | 2013-10-16 17:50:28 +0100 | [diff] [blame] | 469 | dev->hw_features = NETIF_F_SG | |
| 470 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
Ross Lagerwall | 2167ca0 | 2016-10-04 10:29:18 +0100 | [diff] [blame] | 471 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST; |
Paul Durrant | 7365bcf | 2013-10-16 17:50:30 +0100 | [diff] [blame] | 472 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 473 | dev->ethtool_ops = &xenvif_ethtool_ops; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 474 | |
| 475 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; |
| 476 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 477 | dev->min_mtu = 0; |
| 478 | dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; |
| 479 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 480 | /* |
| 481 | * Initialise a dummy MAC address. We choose the numerically |
| 482 | * largest non-broadcast address to prevent the address getting |
| 483 | * stolen by an Ethernet bridge for STP purposes. |
| 484 | * (FE:FF:FF:FF:FF:FF) |
| 485 | */ |
Joe Perches | 3b6ed26 | 2015-03-02 19:54:51 -0800 | [diff] [blame] | 486 | eth_broadcast_addr(dev->dev_addr); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 487 | dev->dev_addr[0] &= ~0x01; |
| 488 | |
| 489 | netif_carrier_off(dev); |
| 490 | |
| 491 | err = register_netdev(dev); |
| 492 | if (err) { |
| 493 | netdev_warn(dev, "Could not register device: err=%d\n", err); |
| 494 | free_netdev(dev); |
| 495 | return ERR_PTR(err); |
| 496 | } |
| 497 | |
| 498 | netdev_dbg(dev, "Successfully created xenvif\n"); |
Paul Durrant | 279f438 | 2013-09-17 17:46:08 +0100 | [diff] [blame] | 499 | |
| 500 | __module_get(THIS_MODULE); |
| 501 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 502 | return vif; |
| 503 | } |
| 504 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 505 | int xenvif_init_queue(struct xenvif_queue *queue) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 506 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 507 | int err, i; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 508 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 509 | queue->credit_bytes = queue->remaining_credit = ~0UL; |
| 510 | queue->credit_usec = 0UL; |
| 511 | init_timer(&queue->credit_timeout); |
Palik, Imre | edafc13 | 2015-03-19 11:05:42 +0100 | [diff] [blame] | 512 | queue->credit_timeout.function = xenvif_tx_credit_callback; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 513 | queue->credit_window_start = get_jiffies_64(); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 514 | |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 515 | queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; |
| 516 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 517 | skb_queue_head_init(&queue->rx_queue); |
| 518 | skb_queue_head_init(&queue->tx_queue); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 519 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 520 | queue->pending_cons = 0; |
| 521 | queue->pending_prod = MAX_PENDING_REQS; |
| 522 | for (i = 0; i < MAX_PENDING_REQS; ++i) |
| 523 | queue->pending_ring[i] = i; |
Paul Durrant | ca2f09f | 2013-12-06 16:36:07 +0000 | [diff] [blame] | 524 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 525 | spin_lock_init(&queue->callback_lock); |
| 526 | spin_lock_init(&queue->response_lock); |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 527 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 528 | /* If ballooning is disabled, this will consume real memory, so you |
| 529 | * better enable it. The long term solution would be to use just a |
| 530 | * bunch of valid page descriptors, without dependency on ballooning |
| 531 | */ |
David Vrabel | ff4b156 | 2015-01-08 18:06:01 +0000 | [diff] [blame] | 532 | err = gnttab_alloc_pages(MAX_PENDING_REQS, |
| 533 | queue->mmap_pages); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 534 | if (err) { |
| 535 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); |
| 536 | return -ENOMEM; |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 537 | } |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 538 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 539 | for (i = 0; i < MAX_PENDING_REQS; i++) { |
| 540 | queue->pending_tx_info[i].callback_struct = (struct ubuf_info) |
| 541 | { .callback = xenvif_zerocopy_callback, |
| 542 | .ctx = NULL, |
| 543 | .desc = i }; |
| 544 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 545 | } |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 546 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 547 | return 0; |
| 548 | } |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 549 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 550 | void xenvif_carrier_on(struct xenvif *vif) |
| 551 | { |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 552 | rtnl_lock(); |
Michał Mirosław | 4710304 | 2011-04-19 03:35:06 +0000 | [diff] [blame] | 553 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
| 554 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
| 555 | netdev_update_features(vif->dev); |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 556 | set_bit(VIF_STATUS_CONNECTED, &vif->status); |
David Vrabel | d0e5d83 | 2011-09-30 06:37:51 +0000 | [diff] [blame] | 557 | if (netif_running(vif->dev)) |
| 558 | xenvif_up(vif); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 559 | rtnl_unlock(); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 560 | } |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 561 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 562 | int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, |
| 563 | unsigned int evtchn) |
| 564 | { |
| 565 | struct net_device *dev = vif->dev; |
| 566 | void *addr; |
| 567 | struct xen_netif_ctrl_sring *shared; |
Juergen Gross | 0364a88 | 2016-09-22 11:06:25 +0200 | [diff] [blame] | 568 | int err; |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 569 | |
| 570 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), |
| 571 | &ring_ref, 1, &addr); |
| 572 | if (err) |
| 573 | goto err; |
| 574 | |
| 575 | shared = (struct xen_netif_ctrl_sring *)addr; |
| 576 | BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); |
| 577 | |
Juergen Gross | 0364a88 | 2016-09-22 11:06:25 +0200 | [diff] [blame] | 578 | err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn); |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 579 | if (err < 0) |
| 580 | goto err_unmap; |
| 581 | |
| 582 | vif->ctrl_irq = err; |
| 583 | |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 584 | xenvif_init_hash(vif); |
| 585 | |
Juergen Gross | 0364a88 | 2016-09-22 11:06:25 +0200 | [diff] [blame] | 586 | err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn, |
| 587 | IRQF_ONESHOT, "xen-netback-ctrl", vif); |
| 588 | if (err) { |
| 589 | pr_warn("Could not setup irq handler for %s\n", dev->name); |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 590 | goto err_deinit; |
| 591 | } |
| 592 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 593 | return 0; |
| 594 | |
| 595 | err_deinit: |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 596 | xenvif_deinit_hash(vif); |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 597 | unbind_from_irqhandler(vif->ctrl_irq, vif); |
| 598 | vif->ctrl_irq = 0; |
| 599 | |
| 600 | err_unmap: |
| 601 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), |
| 602 | vif->ctrl.sring); |
| 603 | vif->ctrl.sring = NULL; |
| 604 | |
| 605 | err: |
| 606 | return err; |
| 607 | } |
| 608 | |
| 609 | int xenvif_connect_data(struct xenvif_queue *queue, |
| 610 | unsigned long tx_ring_ref, |
| 611 | unsigned long rx_ring_ref, |
| 612 | unsigned int tx_evtchn, |
| 613 | unsigned int rx_evtchn) |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 614 | { |
| 615 | struct task_struct *task; |
| 616 | int err = -ENOMEM; |
| 617 | |
| 618 | BUG_ON(queue->tx_irq); |
| 619 | BUG_ON(queue->task); |
| 620 | BUG_ON(queue->dealloc_task); |
| 621 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 622 | err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, |
| 623 | rx_ring_ref); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 624 | if (err < 0) |
| 625 | goto err; |
| 626 | |
| 627 | init_waitqueue_head(&queue->wq); |
| 628 | init_waitqueue_head(&queue->dealloc_wq); |
Wei Liu | a64bd93 | 2014-08-12 11:48:07 +0100 | [diff] [blame] | 629 | atomic_set(&queue->inflight_packets, 0); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 630 | |
Wei Liu | e24f819 | 2014-08-25 16:44:00 +0100 | [diff] [blame] | 631 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, |
| 632 | XENVIF_NAPI_WEIGHT); |
| 633 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 634 | if (tx_evtchn == rx_evtchn) { |
| 635 | /* feature-split-event-channels == 0 */ |
| 636 | err = bind_interdomain_evtchn_to_irqhandler( |
| 637 | queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, |
| 638 | queue->name, queue); |
| 639 | if (err < 0) |
| 640 | goto err_unmap; |
| 641 | queue->tx_irq = queue->rx_irq = err; |
| 642 | disable_irq(queue->tx_irq); |
| 643 | } else { |
| 644 | /* feature-split-event-channels == 1 */ |
| 645 | snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), |
| 646 | "%s-tx", queue->name); |
| 647 | err = bind_interdomain_evtchn_to_irqhandler( |
| 648 | queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, |
| 649 | queue->tx_irq_name, queue); |
| 650 | if (err < 0) |
| 651 | goto err_unmap; |
| 652 | queue->tx_irq = err; |
| 653 | disable_irq(queue->tx_irq); |
| 654 | |
| 655 | snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), |
| 656 | "%s-rx", queue->name); |
| 657 | err = bind_interdomain_evtchn_to_irqhandler( |
| 658 | queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, |
| 659 | queue->rx_irq_name, queue); |
| 660 | if (err < 0) |
| 661 | goto err_tx_unbind; |
| 662 | queue->rx_irq = err; |
| 663 | disable_irq(queue->rx_irq); |
| 664 | } |
| 665 | |
David Vrabel | ecf08d2 | 2014-10-22 14:08:55 +0100 | [diff] [blame] | 666 | queue->stalled = true; |
| 667 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 668 | task = kthread_create(xenvif_kthread_guest_rx, |
| 669 | (void *)queue, "%s-guest-rx", queue->name); |
| 670 | if (IS_ERR(task)) { |
| 671 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
| 672 | err = PTR_ERR(task); |
| 673 | goto err_rx_unbind; |
| 674 | } |
| 675 | queue->task = task; |
David Vrabel | 42b5212 | 2015-02-02 16:57:51 +0000 | [diff] [blame] | 676 | get_task_struct(task); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 677 | |
| 678 | task = kthread_create(xenvif_dealloc_kthread, |
| 679 | (void *)queue, "%s-dealloc", queue->name); |
| 680 | if (IS_ERR(task)) { |
| 681 | pr_warn("Could not allocate kthread for %s\n", queue->name); |
| 682 | err = PTR_ERR(task); |
| 683 | goto err_rx_unbind; |
| 684 | } |
| 685 | queue->dealloc_task = task; |
| 686 | |
| 687 | wake_up_process(queue->task); |
| 688 | wake_up_process(queue->dealloc_task); |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 689 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 690 | return 0; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 691 | |
| 692 | err_rx_unbind: |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 693 | unbind_from_irqhandler(queue->rx_irq, queue); |
| 694 | queue->rx_irq = 0; |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 695 | err_tx_unbind: |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 696 | unbind_from_irqhandler(queue->tx_irq, queue); |
| 697 | queue->tx_irq = 0; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 698 | err_unmap: |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 699 | xenvif_unmap_frontend_data_rings(queue); |
David Vrabel | 4a65852 | 2016-01-15 14:55:35 +0000 | [diff] [blame] | 700 | netif_napi_del(&queue->napi); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 701 | err: |
Wei Liu | b103f35 | 2013-05-16 23:26:11 +0000 | [diff] [blame] | 702 | module_put(THIS_MODULE); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 703 | return err; |
| 704 | } |
| 705 | |
Ian Campbell | 48856286 | 2013-02-06 23:41:35 +0000 | [diff] [blame] | 706 | void xenvif_carrier_off(struct xenvif *vif) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 707 | { |
| 708 | struct net_device *dev = vif->dev; |
Ian Campbell | 48856286 | 2013-02-06 23:41:35 +0000 | [diff] [blame] | 709 | |
| 710 | rtnl_lock(); |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 711 | if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) { |
| 712 | netif_carrier_off(dev); /* discard queued packets */ |
| 713 | if (netif_running(dev)) |
| 714 | xenvif_down(vif); |
| 715 | } |
Ian Campbell | 48856286 | 2013-02-06 23:41:35 +0000 | [diff] [blame] | 716 | rtnl_unlock(); |
Ian Campbell | 48856286 | 2013-02-06 23:41:35 +0000 | [diff] [blame] | 717 | } |
| 718 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 719 | void xenvif_disconnect_data(struct xenvif *vif) |
Ian Campbell | 48856286 | 2013-02-06 23:41:35 +0000 | [diff] [blame] | 720 | { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 721 | struct xenvif_queue *queue = NULL; |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 722 | unsigned int num_queues = vif->num_queues; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 723 | unsigned int queue_index; |
| 724 | |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 725 | xenvif_carrier_off(vif); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 726 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 727 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 728 | queue = &vif->queues[queue_index]; |
David Vrabel | db739ef | 2013-11-21 15:26:09 +0000 | [diff] [blame] | 729 | |
Wei Liu | ea2c5e1 | 2014-08-12 11:48:06 +0100 | [diff] [blame] | 730 | netif_napi_del(&queue->napi); |
| 731 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 732 | if (queue->task) { |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 733 | kthread_stop(queue->task); |
David Vrabel | 42b5212 | 2015-02-02 16:57:51 +0000 | [diff] [blame] | 734 | put_task_struct(queue->task); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 735 | queue->task = NULL; |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 736 | } |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 737 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 738 | if (queue->dealloc_task) { |
| 739 | kthread_stop(queue->dealloc_task); |
| 740 | queue->dealloc_task = NULL; |
| 741 | } |
| 742 | |
| 743 | if (queue->tx_irq) { |
| 744 | if (queue->tx_irq == queue->rx_irq) |
| 745 | unbind_from_irqhandler(queue->tx_irq, queue); |
| 746 | else { |
| 747 | unbind_from_irqhandler(queue->tx_irq, queue); |
| 748 | unbind_from_irqhandler(queue->rx_irq, queue); |
| 749 | } |
| 750 | queue->tx_irq = 0; |
| 751 | } |
| 752 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 753 | xenvif_unmap_frontend_data_rings(queue); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 754 | } |
Paul Durrant | 210c34d | 2015-09-02 17:58:36 +0100 | [diff] [blame] | 755 | |
| 756 | xenvif_mcast_addr_list_free(vif); |
Paul Durrant | 279f438 | 2013-09-17 17:46:08 +0100 | [diff] [blame] | 757 | } |
| 758 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 759 | void xenvif_disconnect_ctrl(struct xenvif *vif) |
| 760 | { |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 761 | if (vif->ctrl_irq) { |
Paul Durrant | c0fcded | 2016-05-18 15:55:42 +0100 | [diff] [blame] | 762 | xenvif_deinit_hash(vif); |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 763 | unbind_from_irqhandler(vif->ctrl_irq, vif); |
| 764 | vif->ctrl_irq = 0; |
| 765 | } |
| 766 | |
| 767 | if (vif->ctrl.sring) { |
| 768 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), |
| 769 | vif->ctrl.sring); |
| 770 | vif->ctrl.sring = NULL; |
| 771 | } |
| 772 | } |
| 773 | |
Andrew J. Bennieston | 8d3d53b | 2014-06-04 10:30:43 +0100 | [diff] [blame] | 774 | /* Reverse the relevant parts of xenvif_init_queue(). |
| 775 | * Used for queue teardown from xenvif_free(), and on the |
| 776 | * error handling paths in xenbus.c:connect(). |
| 777 | */ |
| 778 | void xenvif_deinit_queue(struct xenvif_queue *queue) |
| 779 | { |
David Vrabel | ff4b156 | 2015-01-08 18:06:01 +0000 | [diff] [blame] | 780 | gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); |
Andrew J. Bennieston | 8d3d53b | 2014-06-04 10:30:43 +0100 | [diff] [blame] | 781 | } |
| 782 | |
Paul Durrant | 279f438 | 2013-09-17 17:46:08 +0100 | [diff] [blame] | 783 | void xenvif_free(struct xenvif *vif) |
| 784 | { |
David Vrabel | 9c6f3ff | 2016-01-15 14:55:36 +0000 | [diff] [blame] | 785 | struct xenvif_queue *queues = vif->queues; |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 786 | unsigned int num_queues = vif->num_queues; |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 787 | unsigned int queue_index; |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 788 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 789 | unregister_netdev(vif->dev); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 790 | free_netdev(vif->dev); |
Wei Liu | b103f35 | 2013-05-16 23:26:11 +0000 | [diff] [blame] | 791 | |
David Vrabel | 9c6f3ff | 2016-01-15 14:55:36 +0000 | [diff] [blame] | 792 | for (queue_index = 0; queue_index < num_queues; ++queue_index) |
| 793 | xenvif_deinit_queue(&queues[queue_index]); |
| 794 | vfree(queues); |
| 795 | |
Paul Durrant | 279f438 | 2013-09-17 17:46:08 +0100 | [diff] [blame] | 796 | module_put(THIS_MODULE); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 797 | } |