blob: fb50c6d5f6c39602166284bcfe6b0dee7c60f9ee [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
Wei Liub3f980b2013-08-26 12:59:38 +010033#include <linux/kthread.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000034#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
Arnd Bergmanne7b599d2014-06-10 10:34:36 +020037#include <linux/vmalloc.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000038
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
Zoltan Kissf53c3fe2014-03-06 21:48:26 +000041#include <xen/balloon.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000042
43#define XENVIF_QUEUE_LENGTH 32
Wei Liub3f980b2013-08-26 12:59:38 +010044#define XENVIF_NAPI_WEIGHT 64
Ian Campbellf942dc22011-03-15 00:06:18 +000045
David Vrabelf48da8b2014-10-22 14:08:54 +010046/* Number of bytes allowed on the internal guest Rx queue. */
47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
Wei Liua64bd932014-08-12 11:48:07 +010049/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
53 */
54void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 struct sk_buff *skb)
56{
57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 atomic_inc(&queue->inflight_packets);
59}
60
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{
63 atomic_dec(&queue->inflight_packets);
Ross Lagerwall57b22902015-08-04 15:40:59 +010064
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +010070}
71
Ian Campbellf942dc22011-03-15 00:06:18 +000072int xenvif_schedulable(struct xenvif *vif)
73{
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +010074 return netif_running(vif->dev) &&
David Vrabelf48da8b2014-10-22 14:08:54 +010075 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 !vif->disabled;
Ian Campbellf942dc22011-03-15 00:06:18 +000077}
78
Wei Liue1f00a692013-05-22 06:34:45 +000079static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbellf942dc22011-03-15 00:06:18 +000080{
Wei Liue9ce7cb2014-06-04 10:30:42 +010081 struct xenvif_queue *queue = dev_id;
Ian Campbellf942dc22011-03-15 00:06:18 +000082
Wei Liue9ce7cb2014-06-04 10:30:42 +010083 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
84 napi_schedule(&queue->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +000085
Wei Liue1f00a692013-05-22 06:34:45 +000086 return IRQ_HANDLED;
87}
88
Lad, Prabhakar38741d52015-02-05 13:38:07 +000089static int xenvif_poll(struct napi_struct *napi, int budget)
Wei Liub3f980b2013-08-26 12:59:38 +010090{
Wei Liue9ce7cb2014-06-04 10:30:42 +010091 struct xenvif_queue *queue =
92 container_of(napi, struct xenvif_queue, napi);
Wei Liub3f980b2013-08-26 12:59:38 +010093 int work_done;
94
Wei Liue9d8b2c2014-04-01 12:46:12 +010095 /* This vif is rogue, we pretend we've there is nothing to do
96 * for this vif to deschedule it from NAPI. But this interface
97 * will be turned off in thread context later.
98 */
Zoltan Kiss2561cc12014-08-11 13:01:44 +010099 if (unlikely(queue->vif->disabled)) {
Wei Liue9d8b2c2014-04-01 12:46:12 +0100100 napi_complete(napi);
101 return 0;
102 }
103
Wei Liue9ce7cb2014-06-04 10:30:42 +0100104 work_done = xenvif_tx_action(queue, budget);
Wei Liub3f980b2013-08-26 12:59:38 +0100105
106 if (work_done < budget) {
David Vrabel0d08fce2014-05-16 12:26:04 +0100107 napi_complete(napi);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100108 xenvif_napi_schedule_or_enable_events(queue);
Wei Liub3f980b2013-08-26 12:59:38 +0100109 }
110
111 return work_done;
112}
113
Wei Liue1f00a692013-05-22 06:34:45 +0000114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100116 struct xenvif_queue *queue = dev_id;
Wei Liue1f00a692013-05-22 06:34:45 +0000117
Wei Liue9ce7cb2014-06-04 10:30:42 +0100118 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000119
120 return IRQ_HANDLED;
121}
122
Zoltan Kissf51de242014-07-08 19:49:14 +0100123irqreturn_t xenvif_interrupt(int irq, void *dev_id)
Wei Liue1f00a692013-05-22 06:34:45 +0000124{
125 xenvif_tx_interrupt(irq, dev_id);
126 xenvif_rx_interrupt(irq, dev_id);
127
128 return IRQ_HANDLED;
129}
130
Wei Liue9ce7cb2014-06-04 10:30:42 +0100131int xenvif_queue_stopped(struct xenvif_queue *queue)
Zoltan Kiss09350782014-03-06 21:48:30 +0000132{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100133 struct net_device *dev = queue->vif->dev;
134 unsigned int id = queue->id;
135 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
136}
Zoltan Kiss09350782014-03-06 21:48:30 +0000137
Wei Liue9ce7cb2014-06-04 10:30:42 +0100138void xenvif_wake_queue(struct xenvif_queue *queue)
139{
140 struct net_device *dev = queue->vif->dev;
141 unsigned int id = queue->id;
142 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
143}
144
Paul Durrant40d8abd2016-05-13 09:37:27 +0100145static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
146 void *accel_priv,
147 select_queue_fallback_t fallback)
148{
149 struct xenvif *vif = netdev_priv(dev);
150 unsigned int size = vif->hash.size;
151
Paul Durrantf07f9892016-05-13 09:37:28 +0100152 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
153 u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
154
155 /* Make sure there is no hash information in the socket
156 * buffer otherwise it would be incorrectly forwarded
157 * to the frontend.
158 */
159 skb_clear_hash(skb);
160
161 return index;
162 }
Paul Durrant40d8abd2016-05-13 09:37:27 +0100163
164 xenvif_set_skb_hash(vif, skb);
165
166 if (size == 0)
167 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
168
169 return vif->hash.mapping[skb_get_hash_raw(skb) % size];
170}
171
Ian Campbellf942dc22011-03-15 00:06:18 +0000172static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
173{
174 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100175 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100176 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100177 u16 index;
David Vrabelf48da8b2014-10-22 14:08:54 +0100178 struct xenvif_rx_cb *cb;
Ian Campbellf942dc22011-03-15 00:06:18 +0000179
180 BUG_ON(skb->dev != dev);
181
Wei Liue9ce7cb2014-06-04 10:30:42 +0100182 /* Drop the packet if queues are not set up */
183 if (num_queues < 1)
184 goto drop;
185
186 /* Obtain the queue to be used to transmit this packet */
187 index = skb_get_queue_mapping(skb);
188 if (index >= num_queues) {
189 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
190 index, vif->dev->name);
191 index %= num_queues;
192 }
193 queue = &vif->queues[index];
194
195 /* Drop the packet if queue is not ready */
196 if (queue->task == NULL ||
197 queue->dealloc_task == NULL ||
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000198 !xenvif_schedulable(vif))
Ian Campbellf942dc22011-03-15 00:06:18 +0000199 goto drop;
200
Paul Durrant210c34d2015-09-02 17:58:36 +0100201 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
202 struct ethhdr *eth = (struct ethhdr *)skb->data;
203
204 if (!xenvif_mcast_match(vif, eth->h_dest))
205 goto drop;
206 }
207
David Vrabelf48da8b2014-10-22 14:08:54 +0100208 cb = XENVIF_RX_CB(skb);
David Vrabel26c0e102014-12-18 11:13:06 +0000209 cb->expires = jiffies + vif->drain_timeout;
Ian Campbellf942dc22011-03-15 00:06:18 +0000210
David Vrabelf48da8b2014-10-22 14:08:54 +0100211 xenvif_rx_queue_tail(queue, skb);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100212 xenvif_kick_thread(queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000213
214 return NETDEV_TX_OK;
215
216 drop:
217 vif->dev->stats.tx_dropped++;
218 dev_kfree_skb(skb);
219 return NETDEV_TX_OK;
220}
221
Ian Campbellf942dc22011-03-15 00:06:18 +0000222static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
223{
224 struct xenvif *vif = netdev_priv(dev);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100225 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100226 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100227 unsigned long rx_bytes = 0;
228 unsigned long rx_packets = 0;
229 unsigned long tx_bytes = 0;
230 unsigned long tx_packets = 0;
231 unsigned int index;
232
233 if (vif->queues == NULL)
234 goto out;
235
236 /* Aggregate tx and rx stats from each queue */
237 for (index = 0; index < num_queues; ++index) {
238 queue = &vif->queues[index];
239 rx_bytes += queue->stats.rx_bytes;
240 rx_packets += queue->stats.rx_packets;
241 tx_bytes += queue->stats.tx_bytes;
242 tx_packets += queue->stats.tx_packets;
243 }
244
245out:
246 vif->dev->stats.rx_bytes = rx_bytes;
247 vif->dev->stats.rx_packets = rx_packets;
248 vif->dev->stats.tx_bytes = tx_bytes;
249 vif->dev->stats.tx_packets = tx_packets;
250
Ian Campbellf942dc22011-03-15 00:06:18 +0000251 return &vif->dev->stats;
252}
253
254static void xenvif_up(struct xenvif *vif)
255{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100256 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100257 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100258 unsigned int queue_index;
259
260 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
261 queue = &vif->queues[queue_index];
262 napi_enable(&queue->napi);
263 enable_irq(queue->tx_irq);
264 if (queue->tx_irq != queue->rx_irq)
265 enable_irq(queue->rx_irq);
266 xenvif_napi_schedule_or_enable_events(queue);
267 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000268}
269
270static void xenvif_down(struct xenvif *vif)
271{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100272 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100273 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100274 unsigned int queue_index;
275
276 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
277 queue = &vif->queues[queue_index];
Wei Liue9ce7cb2014-06-04 10:30:42 +0100278 disable_irq(queue->tx_irq);
279 if (queue->tx_irq != queue->rx_irq)
280 disable_irq(queue->rx_irq);
Zoltan Kiss8fe78982014-10-28 15:29:30 +0000281 napi_disable(&queue->napi);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100282 del_timer_sync(&queue->credit_timeout);
283 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000284}
285
286static int xenvif_open(struct net_device *dev)
287{
288 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100289 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000290 xenvif_up(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100291 netif_tx_start_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000292 return 0;
293}
294
295static int xenvif_close(struct net_device *dev)
296{
297 struct xenvif *vif = netdev_priv(dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100298 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
Ian Campbellf942dc22011-03-15 00:06:18 +0000299 xenvif_down(vif);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100300 netif_tx_stop_all_queues(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000301 return 0;
302}
303
304static int xenvif_change_mtu(struct net_device *dev, int mtu)
305{
306 struct xenvif *vif = netdev_priv(dev);
307 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
308
309 if (mtu > max)
310 return -EINVAL;
311 dev->mtu = mtu;
312 return 0;
313}
314
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000315static netdev_features_t xenvif_fix_features(struct net_device *dev,
316 netdev_features_t features)
Ian Campbellf942dc22011-03-15 00:06:18 +0000317{
318 struct xenvif *vif = netdev_priv(dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000319
Michał Mirosław47103042011-04-19 03:35:06 +0000320 if (!vif->can_sg)
321 features &= ~NETIF_F_SG;
Paul Durrant82cada22013-10-16 17:50:32 +0100322 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
Michał Mirosław47103042011-04-19 03:35:06 +0000323 features &= ~NETIF_F_TSO;
Paul Durrant82cada22013-10-16 17:50:32 +0100324 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
325 features &= ~NETIF_F_TSO6;
Paul Durrant146c8a72013-10-16 17:50:28 +0100326 if (!vif->ip_csum)
Michał Mirosław47103042011-04-19 03:35:06 +0000327 features &= ~NETIF_F_IP_CSUM;
Paul Durrant146c8a72013-10-16 17:50:28 +0100328 if (!vif->ipv6_csum)
329 features &= ~NETIF_F_IPV6_CSUM;
Ian Campbellf942dc22011-03-15 00:06:18 +0000330
Michał Mirosław47103042011-04-19 03:35:06 +0000331 return features;
Ian Campbellf942dc22011-03-15 00:06:18 +0000332}
333
334static const struct xenvif_stat {
335 char name[ETH_GSTRING_LEN];
336 u16 offset;
337} xenvif_stats[] = {
338 {
339 "rx_gso_checksum_fixup",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100340 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
Ian Campbellf942dc22011-03-15 00:06:18 +0000341 },
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000342 /* If (sent != success + fail), there are probably packets never
343 * freed up properly!
344 */
345 {
346 "tx_zerocopy_sent",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100347 offsetof(struct xenvif_stats, tx_zerocopy_sent),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000348 },
349 {
350 "tx_zerocopy_success",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100351 offsetof(struct xenvif_stats, tx_zerocopy_success),
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000352 },
353 {
354 "tx_zerocopy_fail",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100355 offsetof(struct xenvif_stats, tx_zerocopy_fail)
Zoltan Kiss1bb332a2014-03-06 21:48:28 +0000356 },
Zoltan Kisse3377f32014-03-06 21:48:29 +0000357 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
358 * a guest with the same MAX_SKB_FRAG
359 */
360 {
361 "tx_frag_overflow",
Wei Liue9ce7cb2014-06-04 10:30:42 +0100362 offsetof(struct xenvif_stats, tx_frag_overflow)
Zoltan Kisse3377f32014-03-06 21:48:29 +0000363 },
Ian Campbellf942dc22011-03-15 00:06:18 +0000364};
365
366static int xenvif_get_sset_count(struct net_device *dev, int string_set)
367{
368 switch (string_set) {
369 case ETH_SS_STATS:
370 return ARRAY_SIZE(xenvif_stats);
371 default:
372 return -EINVAL;
373 }
374}
375
376static void xenvif_get_ethtool_stats(struct net_device *dev,
377 struct ethtool_stats *stats, u64 * data)
378{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100379 struct xenvif *vif = netdev_priv(dev);
Wei Liuf7b50c42014-06-23 10:50:17 +0100380 unsigned int num_queues = vif->num_queues;
Ian Campbellf942dc22011-03-15 00:06:18 +0000381 int i;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100382 unsigned int queue_index;
Ian Campbellf942dc22011-03-15 00:06:18 +0000383
Wei Liue9ce7cb2014-06-04 10:30:42 +0100384 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
385 unsigned long accum = 0;
386 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
David Vrabeld63951d2015-03-04 11:14:46 +0000387 void *vif_stats = &vif->queues[queue_index].stats;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100388 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
389 }
390 data[i] = accum;
391 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000392}
393
394static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
395{
396 int i;
397
398 switch (stringset) {
399 case ETH_SS_STATS:
400 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
401 memcpy(data + i * ETH_GSTRING_LEN,
402 xenvif_stats[i].name, ETH_GSTRING_LEN);
403 break;
404 }
405}
406
stephen hemminger813abbb2012-01-04 11:56:58 +0000407static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbellf942dc22011-03-15 00:06:18 +0000408 .get_link = ethtool_op_get_link,
409
410 .get_sset_count = xenvif_get_sset_count,
411 .get_ethtool_stats = xenvif_get_ethtool_stats,
412 .get_strings = xenvif_get_strings,
413};
414
stephen hemminger813abbb2012-01-04 11:56:58 +0000415static const struct net_device_ops xenvif_netdev_ops = {
Paul Durrant40d8abd2016-05-13 09:37:27 +0100416 .ndo_select_queue = xenvif_select_queue,
Ian Campbellf942dc22011-03-15 00:06:18 +0000417 .ndo_start_xmit = xenvif_start_xmit,
418 .ndo_get_stats = xenvif_get_stats,
419 .ndo_open = xenvif_open,
420 .ndo_stop = xenvif_close,
421 .ndo_change_mtu = xenvif_change_mtu,
Michał Mirosław47103042011-04-19 03:35:06 +0000422 .ndo_fix_features = xenvif_fix_features,
Matt Wilson4a633a62013-01-22 08:08:25 +0000423 .ndo_set_mac_address = eth_mac_addr,
424 .ndo_validate_addr = eth_validate_addr,
Ian Campbellf942dc22011-03-15 00:06:18 +0000425};
426
427struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
428 unsigned int handle)
429{
430 int err;
431 struct net_device *dev;
432 struct xenvif *vif;
433 char name[IFNAMSIZ] = {};
434
435 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100436 /* Allocate a netdev with the max. supported number of queues.
437 * When the guest selects the desired number, it will be updated
Wei Liuf7b50c42014-06-23 10:50:17 +0100438 * via netif_set_real_num_*_queues().
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100439 */
Tom Gundersenc835a672014-07-14 16:37:24 +0200440 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
441 ether_setup, xenvif_max_queues);
Ian Campbellf942dc22011-03-15 00:06:18 +0000442 if (dev == NULL) {
Wei Liub3f980b2013-08-26 12:59:38 +0100443 pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbellf942dc22011-03-15 00:06:18 +0000444 return ERR_PTR(-ENOMEM);
445 }
446
447 SET_NETDEV_DEV(dev, parent);
448
449 vif = netdev_priv(dev);
Paul Durrantac3d5ac2013-12-23 09:27:17 +0000450
Ian Campbellf942dc22011-03-15 00:06:18 +0000451 vif->domid = domid;
452 vif->handle = handle;
Ian Campbellf942dc22011-03-15 00:06:18 +0000453 vif->can_sg = 1;
Paul Durrant146c8a72013-10-16 17:50:28 +0100454 vif->ip_csum = 1;
Ian Campbellf942dc22011-03-15 00:06:18 +0000455 vif->dev = dev;
Wei Liue9d8b2c2014-04-01 12:46:12 +0100456 vif->disabled = false;
David Vrabel26c0e102014-12-18 11:13:06 +0000457 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
458 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
Wei Liue9d8b2c2014-04-01 12:46:12 +0100459
Wei Liuf7b50c42014-06-23 10:50:17 +0100460 /* Start out with no queues. */
Wei Liue9ce7cb2014-06-04 10:30:42 +0100461 vif->queues = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100462 vif->num_queues = 0;
Zoltan Kiss09350782014-03-06 21:48:30 +0000463
David Vrabelecf08d22014-10-22 14:08:55 +0100464 spin_lock_init(&vif->lock);
Paul Durrant210c34d2015-09-02 17:58:36 +0100465 INIT_LIST_HEAD(&vif->fe_mcast_addr);
David Vrabelecf08d22014-10-22 14:08:55 +0100466
Ian Campbellf942dc22011-03-15 00:06:18 +0000467 dev->netdev_ops = &xenvif_netdev_ops;
Paul Durrant146c8a72013-10-16 17:50:28 +0100468 dev->hw_features = NETIF_F_SG |
469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Paul Durrant82cada22013-10-16 17:50:32 +0100470 NETIF_F_TSO | NETIF_F_TSO6;
Paul Durrant7365bcf2013-10-16 17:50:30 +0100471 dev->features = dev->hw_features | NETIF_F_RXCSUM;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000472 dev->ethtool_ops = &xenvif_ethtool_ops;
Ian Campbellf942dc22011-03-15 00:06:18 +0000473
474 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
475
476 /*
477 * Initialise a dummy MAC address. We choose the numerically
478 * largest non-broadcast address to prevent the address getting
479 * stolen by an Ethernet bridge for STP purposes.
480 * (FE:FF:FF:FF:FF:FF)
481 */
Joe Perches3b6ed262015-03-02 19:54:51 -0800482 eth_broadcast_addr(dev->dev_addr);
Ian Campbellf942dc22011-03-15 00:06:18 +0000483 dev->dev_addr[0] &= ~0x01;
484
485 netif_carrier_off(dev);
486
487 err = register_netdev(dev);
488 if (err) {
489 netdev_warn(dev, "Could not register device: err=%d\n", err);
490 free_netdev(dev);
491 return ERR_PTR(err);
492 }
493
494 netdev_dbg(dev, "Successfully created xenvif\n");
Paul Durrant279f4382013-09-17 17:46:08 +0100495
496 __module_get(THIS_MODULE);
497
Ian Campbellf942dc22011-03-15 00:06:18 +0000498 return vif;
499}
500
Wei Liue9ce7cb2014-06-04 10:30:42 +0100501int xenvif_init_queue(struct xenvif_queue *queue)
Ian Campbellf942dc22011-03-15 00:06:18 +0000502{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100503 int err, i;
Ian Campbellf942dc22011-03-15 00:06:18 +0000504
Wei Liue9ce7cb2014-06-04 10:30:42 +0100505 queue->credit_bytes = queue->remaining_credit = ~0UL;
506 queue->credit_usec = 0UL;
507 init_timer(&queue->credit_timeout);
Palik, Imreedafc132015-03-19 11:05:42 +0100508 queue->credit_timeout.function = xenvif_tx_credit_callback;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100509 queue->credit_window_start = get_jiffies_64();
Ian Campbellf942dc22011-03-15 00:06:18 +0000510
David Vrabelf48da8b2014-10-22 14:08:54 +0100511 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
512
Wei Liue9ce7cb2014-06-04 10:30:42 +0100513 skb_queue_head_init(&queue->rx_queue);
514 skb_queue_head_init(&queue->tx_queue);
Ian Campbellf942dc22011-03-15 00:06:18 +0000515
Wei Liue9ce7cb2014-06-04 10:30:42 +0100516 queue->pending_cons = 0;
517 queue->pending_prod = MAX_PENDING_REQS;
518 for (i = 0; i < MAX_PENDING_REQS; ++i)
519 queue->pending_ring[i] = i;
Paul Durrantca2f09f2013-12-06 16:36:07 +0000520
Wei Liue9ce7cb2014-06-04 10:30:42 +0100521 spin_lock_init(&queue->callback_lock);
522 spin_lock_init(&queue->response_lock);
Wei Liue1f00a692013-05-22 06:34:45 +0000523
Wei Liue9ce7cb2014-06-04 10:30:42 +0100524 /* If ballooning is disabled, this will consume real memory, so you
525 * better enable it. The long term solution would be to use just a
526 * bunch of valid page descriptors, without dependency on ballooning
527 */
David Vrabelff4b1562015-01-08 18:06:01 +0000528 err = gnttab_alloc_pages(MAX_PENDING_REQS,
529 queue->mmap_pages);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100530 if (err) {
531 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
532 return -ENOMEM;
Wei Liue1f00a692013-05-22 06:34:45 +0000533 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000534
Wei Liue9ce7cb2014-06-04 10:30:42 +0100535 for (i = 0; i < MAX_PENDING_REQS; i++) {
536 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
537 { .callback = xenvif_zerocopy_callback,
538 .ctx = NULL,
539 .desc = i };
540 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
Wei Liub3f980b2013-08-26 12:59:38 +0100541 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000542
Wei Liue9ce7cb2014-06-04 10:30:42 +0100543 return 0;
544}
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000545
Wei Liue9ce7cb2014-06-04 10:30:42 +0100546void xenvif_carrier_on(struct xenvif *vif)
547{
Ian Campbellf942dc22011-03-15 00:06:18 +0000548 rtnl_lock();
Michał Mirosław47103042011-04-19 03:35:06 +0000549 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
550 dev_set_mtu(vif->dev, ETH_DATA_LEN);
551 netdev_update_features(vif->dev);
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100552 set_bit(VIF_STATUS_CONNECTED, &vif->status);
David Vrabeld0e5d832011-09-30 06:37:51 +0000553 if (netif_running(vif->dev))
554 xenvif_up(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000555 rtnl_unlock();
Wei Liue9ce7cb2014-06-04 10:30:42 +0100556}
Ian Campbellf942dc22011-03-15 00:06:18 +0000557
Paul Durrant4e15ee22016-05-13 09:37:26 +0100558int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
559 unsigned int evtchn)
560{
561 struct net_device *dev = vif->dev;
562 void *addr;
563 struct xen_netif_ctrl_sring *shared;
Juergen Gross0364a882016-09-22 11:06:25 +0200564 int err;
Paul Durrant4e15ee22016-05-13 09:37:26 +0100565
566 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
567 &ring_ref, 1, &addr);
568 if (err)
569 goto err;
570
571 shared = (struct xen_netif_ctrl_sring *)addr;
572 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
573
Juergen Gross0364a882016-09-22 11:06:25 +0200574 err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100575 if (err < 0)
576 goto err_unmap;
577
578 vif->ctrl_irq = err;
579
Paul Durrant40d8abd2016-05-13 09:37:27 +0100580 xenvif_init_hash(vif);
581
Juergen Gross0364a882016-09-22 11:06:25 +0200582 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
583 IRQF_ONESHOT, "xen-netback-ctrl", vif);
584 if (err) {
585 pr_warn("Could not setup irq handler for %s\n", dev->name);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100586 goto err_deinit;
587 }
588
Paul Durrant4e15ee22016-05-13 09:37:26 +0100589 return 0;
590
591err_deinit:
Paul Durrant40d8abd2016-05-13 09:37:27 +0100592 xenvif_deinit_hash(vif);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100593 unbind_from_irqhandler(vif->ctrl_irq, vif);
594 vif->ctrl_irq = 0;
595
596err_unmap:
597 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
598 vif->ctrl.sring);
599 vif->ctrl.sring = NULL;
600
601err:
602 return err;
603}
604
605int xenvif_connect_data(struct xenvif_queue *queue,
606 unsigned long tx_ring_ref,
607 unsigned long rx_ring_ref,
608 unsigned int tx_evtchn,
609 unsigned int rx_evtchn)
Wei Liue9ce7cb2014-06-04 10:30:42 +0100610{
611 struct task_struct *task;
612 int err = -ENOMEM;
613
614 BUG_ON(queue->tx_irq);
615 BUG_ON(queue->task);
616 BUG_ON(queue->dealloc_task);
617
Paul Durrant4e15ee22016-05-13 09:37:26 +0100618 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
619 rx_ring_ref);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100620 if (err < 0)
621 goto err;
622
623 init_waitqueue_head(&queue->wq);
624 init_waitqueue_head(&queue->dealloc_wq);
Wei Liua64bd932014-08-12 11:48:07 +0100625 atomic_set(&queue->inflight_packets, 0);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100626
Wei Liue24f8192014-08-25 16:44:00 +0100627 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
628 XENVIF_NAPI_WEIGHT);
629
Wei Liue9ce7cb2014-06-04 10:30:42 +0100630 if (tx_evtchn == rx_evtchn) {
631 /* feature-split-event-channels == 0 */
632 err = bind_interdomain_evtchn_to_irqhandler(
633 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
634 queue->name, queue);
635 if (err < 0)
636 goto err_unmap;
637 queue->tx_irq = queue->rx_irq = err;
638 disable_irq(queue->tx_irq);
639 } else {
640 /* feature-split-event-channels == 1 */
641 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
642 "%s-tx", queue->name);
643 err = bind_interdomain_evtchn_to_irqhandler(
644 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
645 queue->tx_irq_name, queue);
646 if (err < 0)
647 goto err_unmap;
648 queue->tx_irq = err;
649 disable_irq(queue->tx_irq);
650
651 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
652 "%s-rx", queue->name);
653 err = bind_interdomain_evtchn_to_irqhandler(
654 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
655 queue->rx_irq_name, queue);
656 if (err < 0)
657 goto err_tx_unbind;
658 queue->rx_irq = err;
659 disable_irq(queue->rx_irq);
660 }
661
David Vrabelecf08d22014-10-22 14:08:55 +0100662 queue->stalled = true;
663
Wei Liue9ce7cb2014-06-04 10:30:42 +0100664 task = kthread_create(xenvif_kthread_guest_rx,
665 (void *)queue, "%s-guest-rx", queue->name);
666 if (IS_ERR(task)) {
667 pr_warn("Could not allocate kthread for %s\n", queue->name);
668 err = PTR_ERR(task);
669 goto err_rx_unbind;
670 }
671 queue->task = task;
David Vrabel42b52122015-02-02 16:57:51 +0000672 get_task_struct(task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100673
674 task = kthread_create(xenvif_dealloc_kthread,
675 (void *)queue, "%s-dealloc", queue->name);
676 if (IS_ERR(task)) {
677 pr_warn("Could not allocate kthread for %s\n", queue->name);
678 err = PTR_ERR(task);
679 goto err_rx_unbind;
680 }
681 queue->dealloc_task = task;
682
683 wake_up_process(queue->task);
684 wake_up_process(queue->dealloc_task);
Wei Liub3f980b2013-08-26 12:59:38 +0100685
Ian Campbellf942dc22011-03-15 00:06:18 +0000686 return 0;
Wei Liub3f980b2013-08-26 12:59:38 +0100687
688err_rx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100689 unbind_from_irqhandler(queue->rx_irq, queue);
690 queue->rx_irq = 0;
Wei Liue1f00a692013-05-22 06:34:45 +0000691err_tx_unbind:
Wei Liue9ce7cb2014-06-04 10:30:42 +0100692 unbind_from_irqhandler(queue->tx_irq, queue);
693 queue->tx_irq = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000694err_unmap:
Paul Durrant4e15ee22016-05-13 09:37:26 +0100695 xenvif_unmap_frontend_data_rings(queue);
David Vrabel4a658522016-01-15 14:55:35 +0000696 netif_napi_del(&queue->napi);
Ian Campbellf942dc22011-03-15 00:06:18 +0000697err:
Wei Liub103f352013-05-16 23:26:11 +0000698 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000699 return err;
700}
701
Ian Campbell488562862013-02-06 23:41:35 +0000702void xenvif_carrier_off(struct xenvif *vif)
Ian Campbellf942dc22011-03-15 00:06:18 +0000703{
704 struct net_device *dev = vif->dev;
Ian Campbell488562862013-02-06 23:41:35 +0000705
706 rtnl_lock();
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100707 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
708 netif_carrier_off(dev); /* discard queued packets */
709 if (netif_running(dev))
710 xenvif_down(vif);
711 }
Ian Campbell488562862013-02-06 23:41:35 +0000712 rtnl_unlock();
Ian Campbell488562862013-02-06 23:41:35 +0000713}
714
Paul Durrant4e15ee22016-05-13 09:37:26 +0100715void xenvif_disconnect_data(struct xenvif *vif)
Ian Campbell488562862013-02-06 23:41:35 +0000716{
Wei Liue9ce7cb2014-06-04 10:30:42 +0100717 struct xenvif_queue *queue = NULL;
Wei Liuf7b50c42014-06-23 10:50:17 +0100718 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100719 unsigned int queue_index;
720
Zoltan Kiss3d1af1d2014-08-04 16:20:57 +0100721 xenvif_carrier_off(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000722
Wei Liue9ce7cb2014-06-04 10:30:42 +0100723 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
724 queue = &vif->queues[queue_index];
David Vrabeldb739ef2013-11-21 15:26:09 +0000725
Wei Liuea2c5e12014-08-12 11:48:06 +0100726 netif_napi_del(&queue->napi);
727
Wei Liue9ce7cb2014-06-04 10:30:42 +0100728 if (queue->task) {
Wei Liue9ce7cb2014-06-04 10:30:42 +0100729 kthread_stop(queue->task);
David Vrabel42b52122015-02-02 16:57:51 +0000730 put_task_struct(queue->task);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100731 queue->task = NULL;
Wei Liue1f00a692013-05-22 06:34:45 +0000732 }
Ian Campbellf942dc22011-03-15 00:06:18 +0000733
Wei Liue9ce7cb2014-06-04 10:30:42 +0100734 if (queue->dealloc_task) {
735 kthread_stop(queue->dealloc_task);
736 queue->dealloc_task = NULL;
737 }
738
739 if (queue->tx_irq) {
740 if (queue->tx_irq == queue->rx_irq)
741 unbind_from_irqhandler(queue->tx_irq, queue);
742 else {
743 unbind_from_irqhandler(queue->tx_irq, queue);
744 unbind_from_irqhandler(queue->rx_irq, queue);
745 }
746 queue->tx_irq = 0;
747 }
748
Paul Durrant4e15ee22016-05-13 09:37:26 +0100749 xenvif_unmap_frontend_data_rings(queue);
Wei Liue9ce7cb2014-06-04 10:30:42 +0100750 }
Paul Durrant210c34d2015-09-02 17:58:36 +0100751
752 xenvif_mcast_addr_list_free(vif);
Paul Durrant279f4382013-09-17 17:46:08 +0100753}
754
Paul Durrant4e15ee22016-05-13 09:37:26 +0100755void xenvif_disconnect_ctrl(struct xenvif *vif)
756{
Paul Durrant4e15ee22016-05-13 09:37:26 +0100757 if (vif->ctrl_irq) {
Paul Durrantc0fcded2016-05-18 15:55:42 +0100758 xenvif_deinit_hash(vif);
Paul Durrant4e15ee22016-05-13 09:37:26 +0100759 unbind_from_irqhandler(vif->ctrl_irq, vif);
760 vif->ctrl_irq = 0;
761 }
762
763 if (vif->ctrl.sring) {
764 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
765 vif->ctrl.sring);
766 vif->ctrl.sring = NULL;
767 }
768}
769
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100770/* Reverse the relevant parts of xenvif_init_queue().
771 * Used for queue teardown from xenvif_free(), and on the
772 * error handling paths in xenbus.c:connect().
773 */
774void xenvif_deinit_queue(struct xenvif_queue *queue)
775{
David Vrabelff4b1562015-01-08 18:06:01 +0000776 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
Andrew J. Bennieston8d3d53b2014-06-04 10:30:43 +0100777}
778
Paul Durrant279f4382013-09-17 17:46:08 +0100779void xenvif_free(struct xenvif *vif)
780{
David Vrabel9c6f3ff2016-01-15 14:55:36 +0000781 struct xenvif_queue *queues = vif->queues;
Wei Liuf7b50c42014-06-23 10:50:17 +0100782 unsigned int num_queues = vif->num_queues;
Wei Liue9ce7cb2014-06-04 10:30:42 +0100783 unsigned int queue_index;
Zoltan Kissf53c3fe2014-03-06 21:48:26 +0000784
Wei Liue9ce7cb2014-06-04 10:30:42 +0100785 unregister_netdev(vif->dev);
Ian Campbellf942dc22011-03-15 00:06:18 +0000786 free_netdev(vif->dev);
Wei Liub103f352013-05-16 23:26:11 +0000787
David Vrabel9c6f3ff2016-01-15 14:55:36 +0000788 for (queue_index = 0; queue_index < num_queues; ++queue_index)
789 xenvif_deinit_queue(&queues[queue_index]);
790 vfree(queues);
791
Paul Durrant279f4382013-09-17 17:46:08 +0100792 module_put(THIS_MODULE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000793}