blob: e30ffd29b7e913f2514c4c925979ffd474190acc [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Arnd Bergmanne7b599d2014-06-10 10:34:36 +020037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
David Vrabelf48da8b2014-10-22 14:08:54 +010046/* Number of bytes allowed on the internal guest Rx queue. */
47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
Wei Liua64bd932014-08-12 11:48:07 +010049/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
53 */
54void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 struct sk_buff *skb)
56{
57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 atomic_inc(&queue->inflight_packets);
59}
60
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{
63 atomic_dec(&queue->inflight_packets);
Ross Lagerwall57b22902015-08-04 15:40:59 +010064
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +010070}
71
Ian Campbellf942dc22011-03-15 00:06:18 +000072int xenvif_schedulable(struct xenvif *vif)
73{
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +010074 return netif_running(vif->dev) &&
David Vrabelf48da8b2014-10-22 14:08:54 +010075 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 !vif->disabled;
Ian Campbellf942dc22011-03-15 00:06:18 +000077}
78
Wei Liue1f00a692013-05-22 06:34:45 +000079static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000080{
Wei Liue9ce7cb2014-06-04 10:30:42 +010081 struct xenvif_queue *queue = dev_id;
Ian Campbellf942dc22011-03-15 00:06:18 +000082
Wei Liue9ce7cb2014-06-04 10:30:42 +010083 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
84 napi_schedule(&queue->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000085
Wei Liue1f00a692013-05-22 06:34:45 +000086 return IRQ_HANDLED;
87}
88
Lad, Prabhakar38741d52015-02-05 13:38:07 +000089static int xenvif_poll(struct napi_struct *napi, int budget)
Wei Liub3f980b2013-08-26 12:59:38 +010090{
Wei Liue9ce7cb2014-06-04 10:30:42 +010091 struct xenvif_queue *queue =
92 container_of(napi, struct xenvif_queue, napi);
Wei Liub3f980b2013-08-26 12:59:38 +010093 int work_done;
94
Wei Liue9d8b2c2014-04-01 12:46:12 +010095 /* This vif is rogue, we pretend we've there is nothing to do
96 * for this vif to deschedule it from NAPI. But this interface
97 * will be turned off in thread context later.
98 */
Zoltan Kiss2561cc12014-08-11 13:01:44 +010099 if (unlikely(queue->vif->disabled)) {
Wei Liue9d8b2c2014-04-01 12:46:12 +0100100 napi_complete(napi);
101 return 0;
102 }
103
Wei Liue9ce7cb2014-06-04 10:30:42 +0100104 work_done = xenvif_tx_action(queue, budget);
Wei Liub3f980b2013-08-26 12:59:38 +0100105
106 if (work_done < budget) {
David Vrabel0d08fce2014-05-16 12:26:04 +0100107 napi_complete(napi);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100108 xenvif_napi_schedule_or_enable_events(queue);
Wei Liub3f980b2013-08-26 12:59:38 +0100109 }
110
111 return work_done;
112}
113
Wei Liue1f00a692013-05-22 06:34:45 +0000114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100116 struct xenvif_queue *queue = dev_id;
Wei Liue1f00a692013-05-22 06:34:45 +0000117
Wei Liue9ce7cb2014-06-04 10:30:42 +0100118 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000119
120 return IRQ_HANDLED;
121}
122
Zoltan Kissf51de242014-07-08 19:49:14 +0100123irqreturn_t xenvif_interrupt(int irq, void *dev_id)
Wei Liue1f00a692013-05-22 06:34:45 +0000124{
125 xenvif_tx_interrupt(irq, dev_id);
126 xenvif_rx_interrupt(irq, dev_id);
127
128 return IRQ_HANDLED;
129}
130
Wei Liue9ce7cb2014-06-04 10:30:42 +0100131int xenvif_queue_stopped(struct xenvif_queue *queue)
Zoltan Kiss09350782014-03-06 21:48:30 +0000132{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100133 struct net_device *dev = queue->vif->dev;
134 unsigned int id = queue->id;
135 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
136}
Zoltan Kiss09350782014-03-06 21:48:30 +0000137
Wei Liue9ce7cb2014-06-04 10:30:42 +0100138void xenvif_wake_queue(struct xenvif_queue *queue)
139{
140 struct net_device *dev = queue->vif->dev;
141 unsigned int id = queue->id;
142 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
143}
144
Paul Durrant40d8abd2016-05-13 09:37:27 +0100145static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
146 void *accel_priv,
147 select_queue_fallback_t fallback)
148{
149 struct xenvif *vif = netdev_priv(dev);
150 unsigned int size = vif->hash.size;
151
Paul Durrant912e27e2016-10-07 09:32:31 +0100152 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
153 return fallback(dev, skb) % dev->real_num_tx_queues;
Paul Durrant40d8abd2016-05-13 09:37:27 +0100154
155 xenvif_set_skb_hash(vif, skb);
156
157 if (size == 0)
158 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
159
160 return vif->hash.mapping[skb_get_hash_raw(skb) % size];
161}
162
Ian Campbellf942dc22011-03-15 00:06:18 +0000163static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
164{
165 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100166 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100167 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100168 u16 index;
David Vrabelf48da8b2014-10-22 14:08:54 +0100169 struct xenvif_rx_cb *cb;
Ian Campbellf942dc22011-03-15 00:06:18 +0000170
171 BUG_ON(skb->dev != dev);
172
Wei Liue9ce7cb2014-06-04 10:30:42 +0100173 /* Drop the packet if queues are not set up */
174 if (num_queues < 1)
175 goto drop;
176
177 /* Obtain the queue to be used to transmit this packet */
178 index = skb_get_queue_mapping(skb);
179 if (index >= num_queues) {
180 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
181 index, vif->dev->name);
182 index %= num_queues;
183 }
184 queue = &vif->queues[index];
185
186 /* Drop the packet if queue is not ready */
187 if (queue->task == NULL ||
188 queue->dealloc_task == NULL ||
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000189 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000190 goto drop;
191
Paul Durrant210c34d2015-09-02 17:58:36 +0100192 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
193 struct ethhdr *eth = (struct ethhdr *)skb->data;
194
195 if (!xenvif_mcast_match(vif, eth->h_dest))
196 goto drop;
197 }
198
David Vrabelf48da8b2014-10-22 14:08:54 +0100199 cb = XENVIF_RX_CB(skb);
David Vrabel26c0e102014-12-18 11:13:06 +0000200 cb->expires = jiffies + vif->drain_timeout;
Ian Campbellf942dc22011-03-15 00:06:18 +0000201
Paul Durrant912e27e2016-10-07 09:32:31 +0100202 /* If there is no hash algorithm configured then make sure there
203 * is no hash information in the socket buffer otherwise it
204 * would be incorrectly forwarded to the frontend.
205 */
206 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
207 skb_clear_hash(skb);
208
David Vrabelf48da8b2014-10-22 14:08:54 +0100209 xenvif_rx_queue_tail(queue, skb);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100210 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000211
212 return NETDEV_TX_OK;
213
214 drop:
215 vif->dev->stats.tx_dropped++;
216 dev_kfree_skb(skb);
217 return NETDEV_TX_OK;
218}
219
Ian Campbellf942dc22011-03-15 00:06:18 +0000220static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
221{
222 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100223 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100224 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100225 unsigned long rx_bytes = 0;
226 unsigned long rx_packets = 0;
227 unsigned long tx_bytes = 0;
228 unsigned long tx_packets = 0;
229 unsigned int index;
230
231 if (vif->queues == NULL)
232 goto out;
233
234 /* Aggregate tx and rx stats from each queue */
235 for (index = 0; index < num_queues; ++index) {
236 queue = &vif->queues[index];
237 rx_bytes += queue->stats.rx_bytes;
238 rx_packets += queue->stats.rx_packets;
239 tx_bytes += queue->stats.tx_bytes;
240 tx_packets += queue->stats.tx_packets;
241 }
242
243out:
244 vif->dev->stats.rx_bytes = rx_bytes;
245 vif->dev->stats.rx_packets = rx_packets;
246 vif->dev->stats.tx_bytes = tx_bytes;
247 vif->dev->stats.tx_packets = tx_packets;
248
Ian Campbellf942dc22011-03-15 00:06:18 +0000249 return &vif->dev->stats;
250}
251
252static void xenvif_up(struct xenvif *vif)
253{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100254 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100255 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100256 unsigned int queue_index;
257
258 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
259 queue = &vif->queues[queue_index];
260 napi_enable(&queue->napi);
261 enable_irq(queue->tx_irq);
262 if (queue->tx_irq != queue->rx_irq)
263 enable_irq(queue->rx_irq);
264 xenvif_napi_schedule_or_enable_events(queue);
265 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000266}
267
268static void xenvif_down(struct xenvif *vif)
269{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100270 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100271 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100272 unsigned int queue_index;
273
274 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
275 queue = &vif->queues[queue_index];
Wei Liue9ce7cb2014-06-04 10:30:42 +0100276 disable_irq(queue->tx_irq);
277 if (queue->tx_irq != queue->rx_irq)
278 disable_irq(queue->rx_irq);
Zoltan Kiss8fe78982014-10-28 15:29:30 +0000279 napi_disable(&queue->napi);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100280 del_timer_sync(&queue->credit_timeout);
281 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000282}
283
284static int xenvif_open(struct net_device *dev)
285{
286 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100287 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000288 xenvif_up(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100289 netif_tx_start_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000290 return 0;
291}
292
293static int xenvif_close(struct net_device *dev)
294{
295 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100296 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000297 xenvif_down(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100298 netif_tx_stop_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000299 return 0;
300}
301
302static int xenvif_change_mtu(struct net_device *dev, int mtu)
303{
304 struct xenvif *vif = netdev_priv(dev);
Jarod Wilsond0c2c992016-10-20 13:55:21 -0400305 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
Ian Campbellf942dc22011-03-15 00:06:18 +0000306
307 if (mtu > max)
308 return -EINVAL;
309 dev->mtu = mtu;
310 return 0;
311}
312
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000313static netdev_features_t xenvif_fix_features(struct net_device *dev,
314 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000315{
316 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000317
Michał Mirosław47103042011-04-19 03:35:06 +0000318 if (!vif->can_sg)
319 features &= ~NETIF_F_SG;
Paul Durrantfedbc8c2016-10-04 10:29:13 +0100320 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000321 features &= ~NETIF_F_TSO;
Paul Durrantfedbc8c2016-10-04 10:29:13 +0100322 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
Paul Durrant82cada22013-10-16 17:50:32 +0100323 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100324 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000325 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100326 if (!vif->ipv6_csum)
327 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000328
Michał Mirosław47103042011-04-19 03:35:06 +0000329 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000330}
331
332static const struct xenvif_stat {
333 char name[ETH_GSTRING_LEN];
334 u16 offset;
335} xenvif_stats[] = {
336 {
337 "rx_gso_checksum_fixup",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100338 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
Ian Campbellf942dc22011-03-15 00:06:18 +0000339 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000340 /* If (sent != success + fail), there are probably packets never
341 * freed up properly!
342 */
343 {
344 "tx_zerocopy_sent",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100345 offsetof(struct xenvif_stats, tx_zerocopy_sent),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000346 },
347 {
348 "tx_zerocopy_success",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100349 offsetof(struct xenvif_stats, tx_zerocopy_success),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000350 },
351 {
352 "tx_zerocopy_fail",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100353 offsetof(struct xenvif_stats, tx_zerocopy_fail)
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000354 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000355 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
356 * a guest with the same MAX_SKB_FRAG
357 */
358 {
359 "tx_frag_overflow",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100360 offsetof(struct xenvif_stats, tx_frag_overflow)
Zoltan Kisse3377f32014-03-06 21:48:29 +0000361 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000362};
363
364static int xenvif_get_sset_count(struct net_device *dev, int string_set)
365{
366 switch (string_set) {
367 case ETH_SS_STATS:
368 return ARRAY_SIZE(xenvif_stats);
369 default:
370 return -EINVAL;
371 }
372}
373
374static void xenvif_get_ethtool_stats(struct net_device *dev,
375 struct ethtool_stats *stats, u64 * data)
376{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100377 struct xenvif *vif = netdev_priv(dev);
Wei Liuf7b50c42014-06-23 10:50:17 +0100378 unsigned int num_queues = vif->num_queues;
Ian Campbellf942dc22011-03-15 00:06:18 +0000379 int i;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100380 unsigned int queue_index;
Ian Campbellf942dc22011-03-15 00:06:18 +0000381
Wei Liue9ce7cb2014-06-04 10:30:42 +0100382 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
383 unsigned long accum = 0;
384 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
David Vrabeld63951d2015-03-04 11:14:46 +0000385 void *vif_stats = &vif->queues[queue_index].stats;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100386 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
387 }
388 data[i] = accum;
389 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000390}
391
392static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
393{
394 int i;
395
396 switch (stringset) {
397 case ETH_SS_STATS:
398 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
399 memcpy(data + i * ETH_GSTRING_LEN,
400 xenvif_stats[i].name, ETH_GSTRING_LEN);
401 break;
402 }
403}
404
stephen hemminger813abbb2012-01-04 11:56:58 +0000405static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000406 .get_link = ethtool_op_get_link,
407
408 .get_sset_count = xenvif_get_sset_count,
409 .get_ethtool_stats = xenvif_get_ethtool_stats,
410 .get_strings = xenvif_get_strings,
411};
412
stephen hemminger813abbb2012-01-04 11:56:58 +0000413static const struct net_device_ops xenvif_netdev_ops = {
Paul Durrant40d8abd2016-05-13 09:37:27 +0100414 .ndo_select_queue = xenvif_select_queue,
Ian Campbellf942dc22011-03-15 00:06:18 +0000415 .ndo_start_xmit = xenvif_start_xmit,
416 .ndo_get_stats = xenvif_get_stats,
417 .ndo_open = xenvif_open,
418 .ndo_stop = xenvif_close,
419 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000420 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000421 .ndo_set_mac_address = eth_mac_addr,
422 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000423};
424
425struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
426 unsigned int handle)
427{
428 int err;
429 struct net_device *dev;
430 struct xenvif *vif;
431 char name[IFNAMSIZ] = {};
432
433 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100434 /* Allocate a netdev with the max. supported number of queues.
435 * When the guest selects the desired number, it will be updated
Wei Liuf7b50c42014-06-23 10:50:17 +0100436 * via netif_set_real_num_*_queues().
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100437 */
Tom Gundersenc835a672014-07-14 16:37:24 +0200438 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
439 ether_setup, xenvif_max_queues);
Ian Campbellf942dc22011-03-15 00:06:18 +0000440 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100441 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000442 return ERR_PTR(-ENOMEM);
443 }
444
445 SET_NETDEV_DEV(dev, parent);
446
447 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000448
Ian Campbellf942dc22011-03-15 00:06:18 +0000449 vif->domid = domid;
450 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000451 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100452 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000453 vif->dev = dev;
Wei Liue9d8b2c2014-04-01 12:46:12 +0100454 vif->disabled = false;
David Vrabel26c0e102014-12-18 11:13:06 +0000455 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
456 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
Wei Liue9d8b2c2014-04-01 12:46:12 +0100457
Wei Liuf7b50c42014-06-23 10:50:17 +0100458 /* Start out with no queues. */
Wei Liue9ce7cb2014-06-04 10:30:42 +0100459 vif->queues = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100460 vif->num_queues = 0;
Zoltan Kiss09350782014-03-06 21:48:30 +0000461
David Vrabelecf08d22014-10-22 14:08:55 +0100462 spin_lock_init(&vif->lock);
Paul Durrant210c34d2015-09-02 17:58:36 +0100463 INIT_LIST_HEAD(&vif->fe_mcast_addr);
David Vrabelecf08d22014-10-22 14:08:55 +0100464
Ian Campbellf942dc22011-03-15 00:06:18 +0000465 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100466 dev->hw_features = NETIF_F_SG |
467 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ross Lagerwall2167ca02016-10-04 10:29:18 +0100468 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100469 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000470 dev->ethtool_ops = &xenvif_ethtool_ops;
Ian Campbellf942dc22011-03-15 00:06:18 +0000471
472 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
473
Jarod Wilsond0c2c992016-10-20 13:55:21 -0400474 dev->min_mtu = 0;
475 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
476
Ian Campbellf942dc22011-03-15 00:06:18 +0000477 /*
478 * Initialise a dummy MAC address. We choose the numerically
479 * largest non-broadcast address to prevent the address getting
480 * stolen by an Ethernet bridge for STP purposes.
481 * (FE:FF:FF:FF:FF:FF)
482 */
Joe Perches3b6ed262015-03-02 19:54:51 -0800483 eth_broadcast_addr(dev->dev_addr);
Ian Campbellf942dc22011-03-15 00:06:18 +0000484 dev->dev_addr[0] &= ~0x01;
485
486 netif_carrier_off(dev);
487
488 err = register_netdev(dev);
489 if (err) {
490 netdev_warn(dev, "Could not register device: err=%d\n", err);
491 free_netdev(dev);
492 return ERR_PTR(err);
493 }
494
495 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100496
497 __module_get(THIS_MODULE);
498
Ian Campbellf942dc22011-03-15 00:06:18 +0000499 return vif;
500}
501
Wei Liue9ce7cb2014-06-04 10:30:42 +0100502int xenvif_init_queue(struct xenvif_queue *queue)
Ian Campbellf942dc22011-03-15 00:06:18 +0000503{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100504 int err, i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000505
Wei Liue9ce7cb2014-06-04 10:30:42 +0100506 queue->credit_bytes = queue->remaining_credit = ~0UL;
507 queue->credit_usec = 0UL;
508 init_timer(&queue->credit_timeout);
Palik, Imreedafc132015-03-19 11:05:42 +0100509 queue->credit_timeout.function = xenvif_tx_credit_callback;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100510 queue->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000511
David Vrabelf48da8b2014-10-22 14:08:54 +0100512 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
513
Wei Liue9ce7cb2014-06-04 10:30:42 +0100514 skb_queue_head_init(&queue->rx_queue);
515 skb_queue_head_init(&queue->tx_queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000516
Wei Liue9ce7cb2014-06-04 10:30:42 +0100517 queue->pending_cons = 0;
518 queue->pending_prod = MAX_PENDING_REQS;
519 for (i = 0; i < MAX_PENDING_REQS; ++i)
520 queue->pending_ring[i] = i;
Paul Durrantca2f09f2013-12-06 16:36:07 +0000521
Wei Liue9ce7cb2014-06-04 10:30:42 +0100522 spin_lock_init(&queue->callback_lock);
523 spin_lock_init(&queue->response_lock);
Wei Liue1f00a692013-05-22 06:34:45 +0000524
Wei Liue9ce7cb2014-06-04 10:30:42 +0100525 /* If ballooning is disabled, this will consume real memory, so you
526 * better enable it. The long term solution would be to use just a
527 * bunch of valid page descriptors, without dependency on ballooning
528 */
David Vrabelff4b1562015-01-08 18:06:01 +0000529 err = gnttab_alloc_pages(MAX_PENDING_REQS,
530 queue->mmap_pages);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100531 if (err) {
532 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
533 return -ENOMEM;
Wei Liue1f00a692013-05-22 06:34:45 +0000534 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000535
Wei Liue9ce7cb2014-06-04 10:30:42 +0100536 for (i = 0; i < MAX_PENDING_REQS; i++) {
537 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
538 { .callback = xenvif_zerocopy_callback,
539 .ctx = NULL,
540 .desc = i };
541 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
Wei Liub3f980b2013-08-26 12:59:38 +0100542 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000543
Wei Liue9ce7cb2014-06-04 10:30:42 +0100544 return 0;
545}
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000546
Wei Liue9ce7cb2014-06-04 10:30:42 +0100547void xenvif_carrier_on(struct xenvif *vif)
548{
Ian Campbellf942dc22011-03-15 00:06:18 +0000549 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000550 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
551 dev_set_mtu(vif->dev, ETH_DATA_LEN);
552 netdev_update_features(vif->dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100553 set_bit(VIF_STATUS_CONNECTED, &vif->status);
David Vrabeld0e5d832011-09-30 06:37:51 +0000554 if (netif_running(vif->dev))
555 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000556 rtnl_unlock();
Wei Liue9ce7cb2014-06-04 10:30:42 +0100557}
Ian Campbellf942dc22011-03-15 00:06:18 +0000558
Paul Durrant4e15ee22016-05-13 09:37:26 +0100559int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
560 unsigned int evtchn)
561{
562 struct net_device *dev = vif->dev;
563 void *addr;
564 struct xen_netif_ctrl_sring *shared;
Juergen Gross0364a882016-09-22 11:06:25 +0200565 int err;
Paul Durrant4e15ee22016-05-13 09:37:26 +0100566
567 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
568 &ring_ref, 1, &addr);
569 if (err)
570 goto err;
571
572 shared = (struct xen_netif_ctrl_sring *)addr;
573 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
574
Juergen Gross0364a882016-09-22 11:06:25 +0200575 err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100576 if (err < 0)
577 goto err_unmap;
578
579 vif->ctrl_irq = err;
580
Paul Durrant40d8abd2016-05-13 09:37:27 +0100581 xenvif_init_hash(vif);
582
Juergen Gross0364a882016-09-22 11:06:25 +0200583 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
584 IRQF_ONESHOT, "xen-netback-ctrl", vif);
585 if (err) {
586 pr_warn("Could not setup irq handler for %s\n", dev->name);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100587 goto err_deinit;
588 }
589
Paul Durrant4e15ee22016-05-13 09:37:26 +0100590 return 0;
591
592err_deinit:
Paul Durrant40d8abd2016-05-13 09:37:27 +0100593 xenvif_deinit_hash(vif);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100594 unbind_from_irqhandler(vif->ctrl_irq, vif);
595 vif->ctrl_irq = 0;
596
597err_unmap:
598 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
599 vif->ctrl.sring);
600 vif->ctrl.sring = NULL;
601
602err:
603 return err;
604}
605
606int xenvif_connect_data(struct xenvif_queue *queue,
607 unsigned long tx_ring_ref,
608 unsigned long rx_ring_ref,
609 unsigned int tx_evtchn,
610 unsigned int rx_evtchn)
Wei Liue9ce7cb2014-06-04 10:30:42 +0100611{
612 struct task_struct *task;
613 int err = -ENOMEM;
614
615 BUG_ON(queue->tx_irq);
616 BUG_ON(queue->task);
617 BUG_ON(queue->dealloc_task);
618
Paul Durrant4e15ee22016-05-13 09:37:26 +0100619 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
620 rx_ring_ref);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100621 if (err < 0)
622 goto err;
623
624 init_waitqueue_head(&queue->wq);
625 init_waitqueue_head(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +0100626 atomic_set(&queue->inflight_packets, 0);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100627
Wei Liue24f8192014-08-25 16:44:00 +0100628 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
629 XENVIF_NAPI_WEIGHT);
630
Wei Liue9ce7cb2014-06-04 10:30:42 +0100631 if (tx_evtchn == rx_evtchn) {
632 /* feature-split-event-channels == 0 */
633 err = bind_interdomain_evtchn_to_irqhandler(
634 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
635 queue->name, queue);
636 if (err < 0)
637 goto err_unmap;
638 queue->tx_irq = queue->rx_irq = err;
639 disable_irq(queue->tx_irq);
640 } else {
641 /* feature-split-event-channels == 1 */
642 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
643 "%s-tx", queue->name);
644 err = bind_interdomain_evtchn_to_irqhandler(
645 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
646 queue->tx_irq_name, queue);
647 if (err < 0)
648 goto err_unmap;
649 queue->tx_irq = err;
650 disable_irq(queue->tx_irq);
651
652 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
653 "%s-rx", queue->name);
654 err = bind_interdomain_evtchn_to_irqhandler(
655 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
656 queue->rx_irq_name, queue);
657 if (err < 0)
658 goto err_tx_unbind;
659 queue->rx_irq = err;
660 disable_irq(queue->rx_irq);
661 }
662
David Vrabelecf08d22014-10-22 14:08:55 +0100663 queue->stalled = true;
664
Wei Liue9ce7cb2014-06-04 10:30:42 +0100665 task = kthread_create(xenvif_kthread_guest_rx,
666 (void *)queue, "%s-guest-rx", queue->name);
667 if (IS_ERR(task)) {
668 pr_warn("Could not allocate kthread for %s\n", queue->name);
669 err = PTR_ERR(task);
670 goto err_rx_unbind;
671 }
672 queue->task = task;
David Vrabel42b52122015-02-02 16:57:51 +0000673 get_task_struct(task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100674
675 task = kthread_create(xenvif_dealloc_kthread,
676 (void *)queue, "%s-dealloc", queue->name);
677 if (IS_ERR(task)) {
678 pr_warn("Could not allocate kthread for %s\n", queue->name);
679 err = PTR_ERR(task);
680 goto err_rx_unbind;
681 }
682 queue->dealloc_task = task;
683
684 wake_up_process(queue->task);
685 wake_up_process(queue->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100686
Ian Campbellf942dc22011-03-15 00:06:18 +0000687 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100688
689err_rx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100690 unbind_from_irqhandler(queue->rx_irq, queue);
691 queue->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000692err_tx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100693 unbind_from_irqhandler(queue->tx_irq, queue);
694 queue->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000695err_unmap:
Paul Durrant4e15ee22016-05-13 09:37:26 +0100696 xenvif_unmap_frontend_data_rings(queue);
David Vrabel4a658522016-01-15 14:55:35 +0000697 netif_napi_del(&queue->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +0000698err:
Wei Liub103f352013-05-16 23:26:11 +0000699 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000700 return err;
701}
702
Ian Campbell488562862013-02-06 23:41:35 +0000703void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000704{
705 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000706
707 rtnl_lock();
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100708 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
709 netif_carrier_off(dev); /* discard queued packets */
710 if (netif_running(dev))
711 xenvif_down(vif);
712 }
Ian Campbell488562862013-02-06 23:41:35 +0000713 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000714}
715
Paul Durrant4e15ee22016-05-13 09:37:26 +0100716void xenvif_disconnect_data(struct xenvif *vif)
Ian Campbell488562862013-02-06 23:41:35 +0000717{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100718 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100719 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100720 unsigned int queue_index;
721
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100722 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000723
Wei Liue9ce7cb2014-06-04 10:30:42 +0100724 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
725 queue = &vif->queues[queue_index];
David Vrabeldb739ef2013-11-21 15:26:09 +0000726
Wei Liuea2c5e12014-08-12 11:48:06 +0100727 netif_napi_del(&queue->napi);
728
Wei Liue9ce7cb2014-06-04 10:30:42 +0100729 if (queue->task) {
Wei Liue9ce7cb2014-06-04 10:30:42 +0100730 kthread_stop(queue->task);
David Vrabel42b52122015-02-02 16:57:51 +0000731 put_task_struct(queue->task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100732 queue->task = NULL;
Wei Liue1f00a692013-05-22 06:34:45 +0000733 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000734
Wei Liue9ce7cb2014-06-04 10:30:42 +0100735 if (queue->dealloc_task) {
736 kthread_stop(queue->dealloc_task);
737 queue->dealloc_task = NULL;
738 }
739
740 if (queue->tx_irq) {
741 if (queue->tx_irq == queue->rx_irq)
742 unbind_from_irqhandler(queue->tx_irq, queue);
743 else {
744 unbind_from_irqhandler(queue->tx_irq, queue);
745 unbind_from_irqhandler(queue->rx_irq, queue);
746 }
747 queue->tx_irq = 0;
748 }
749
Paul Durrant4e15ee22016-05-13 09:37:26 +0100750 xenvif_unmap_frontend_data_rings(queue);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100751 }
Paul Durrant210c34d2015-09-02 17:58:36 +0100752
753 xenvif_mcast_addr_list_free(vif);
Paul Durrant279f4382013-09-17 17:46:08 +0100754}
755
Paul Durrant4e15ee22016-05-13 09:37:26 +0100756void xenvif_disconnect_ctrl(struct xenvif *vif)
757{
Paul Durrant4e15ee22016-05-13 09:37:26 +0100758 if (vif->ctrl_irq) {
Paul Durrantc0fcded2016-05-18 15:55:42 +0100759 xenvif_deinit_hash(vif);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100760 unbind_from_irqhandler(vif->ctrl_irq, vif);
761 vif->ctrl_irq = 0;
762 }
763
764 if (vif->ctrl.sring) {
765 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
766 vif->ctrl.sring);
767 vif->ctrl.sring = NULL;
768 }
769}
770
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100771/* Reverse the relevant parts of xenvif_init_queue().
772 * Used for queue teardown from xenvif_free(), and on the
773 * error handling paths in xenbus.c:connect().
774 */
775void xenvif_deinit_queue(struct xenvif_queue *queue)
776{
David Vrabelff4b1562015-01-08 18:06:01 +0000777 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100778}
779
Paul Durrant279f4382013-09-17 17:46:08 +0100780void xenvif_free(struct xenvif *vif)
781{
David Vrabel9c6f3ff2016-01-15 14:55:36 +0000782 struct xenvif_queue *queues = vif->queues;
Wei Liuf7b50c42014-06-23 10:50:17 +0100783 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100784 unsigned int queue_index;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000785
Wei Liue9ce7cb2014-06-04 10:30:42 +0100786 unregister_netdev(vif->dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000787 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000788
David Vrabel9c6f3ff2016-01-15 14:55:36 +0000789 for (queue_index = 0; queue_index < num_queues; ++queue_index)
790 xenvif_deinit_queue(&queues[queue_index]);
791 vfree(queues);
792
Paul Durrant279f4382013-09-17 17:46:08 +0100793 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000794}