blob: 158b5e639fc7307d5a98580cfd65ca23b9d3db8a [file] [log] [blame]
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001/*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
Joe Perches383eda32013-06-27 21:57:49 -070032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070034#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/skbuff.h>
39#include <linux/ethtool.h>
40#include <linux/if_ether.h>
Wei Liu9ecd1a72013-04-22 02:20:41 +000041#include <net/tcp.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070042#include <linux/udp.h>
43#include <linux/moduleparam.h>
44#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090045#include <linux/slab.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070046#include <net/ip.h>
47
Stefano Stabellinica981632012-08-08 17:21:23 +000048#include <asm/xen/page.h>
Jeremy Fitzhardinge1ccbf532009-10-06 15:11:14 -070049#include <xen/xen.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070050#include <xen/xenbus.h>
51#include <xen/events.h>
52#include <xen/page.h>
Igor Mammedovb9136d22012-03-21 15:08:38 +010053#include <xen/platform_pci.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070054#include <xen/grant_table.h>
55
56#include <xen/interface/io/netif.h>
57#include <xen/interface/memory.h>
58#include <xen/interface/grant_table.h>
59
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070060static const struct ethtool_ops xennet_ethtool_ops;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070061
62struct netfront_cb {
Ian Campbell36832432012-08-22 00:26:47 +000063 int pull_to;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070064};
65
66#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
67
68#define RX_COPY_THRESHOLD 256
69
70#define GRANT_INVALID_REF 0
71
Jeremy Fitzhardinge667c78af2010-12-08 12:39:12 -080072#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
73#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
Wei Liu40206dd2012-01-26 07:23:23 +000074#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070075
stephen hemmingere00f85b2011-06-21 05:35:31 +000076struct netfront_stats {
77 u64 rx_packets;
78 u64 tx_packets;
79 u64 rx_bytes;
80 u64 tx_bytes;
81 struct u64_stats_sync syncp;
82};
83
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070084struct netfront_info {
85 struct list_head list;
86 struct net_device *netdev;
87
Stephen Hemmingerbea33482007-10-03 16:41:36 -070088 struct napi_struct napi;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070089
Wei Liud634bf22013-05-22 06:34:46 +000090 /* Split event channels support, tx_* == rx_* when using
91 * single event channel.
92 */
93 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
98
Jeremy Fitzhardinge84284d32007-10-15 12:59:53 -070099 struct xenbus_device *xbdev;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700100
101 spinlock_t tx_lock;
Jeremy Fitzhardinge84284d32007-10-15 12:59:53 -0700102 struct xen_netif_tx_front_ring tx;
103 int tx_ring_ref;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700104
105 /*
106 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
107 * are linked from tx_skb_freelist through skb_entry.link.
108 *
109 * NB. Freelist index entries are always going to be less than
110 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
111 * greater than PAGE_OFFSET: we use this property to distinguish
112 * them.
113 */
114 union skb_entry {
115 struct sk_buff *skb;
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700116 unsigned long link;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700117 } tx_skbs[NET_TX_RING_SIZE];
118 grant_ref_t gref_tx_head;
119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
Annie Licefe0072014-01-28 11:35:42 +0800120 struct page *grant_tx_page[NET_TX_RING_SIZE];
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700121 unsigned tx_skb_freelist;
122
Jeremy Fitzhardinge84284d32007-10-15 12:59:53 -0700123 spinlock_t rx_lock ____cacheline_aligned_in_smp;
124 struct xen_netif_rx_front_ring rx;
125 int rx_ring_ref;
126
127 /* Receive-ring batched refills. */
128#define RX_MIN_TARGET 8
129#define RX_DFL_MIN_TARGET 64
130#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
131 unsigned rx_min_target, rx_max_target, rx_target;
132 struct sk_buff_head rx_batch;
133
134 struct timer_list rx_refill_timer;
135
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700136 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
137 grant_ref_t gref_rx_head;
138 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
139
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700140 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
141 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
142 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000143
144 /* Statistics */
stephen hemmingere00f85b2011-06-21 05:35:31 +0000145 struct netfront_stats __percpu *stats;
146
Ian Campbelle9a799e2011-03-10 07:04:18 +0000147 unsigned long rx_gso_checksum_fixup;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700148};
149
150struct netfront_rx_info {
151 struct xen_netif_rx_response rx;
152 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
153};
154
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700155static void skb_entry_set_link(union skb_entry *list, unsigned short id)
156{
157 list->link = id;
158}
159
160static int skb_entry_is_link(const union skb_entry *list)
161{
162 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
Eric Dumazet807540b2010-09-23 05:40:09 +0000163 return (unsigned long)list->skb < PAGE_OFFSET;
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700164}
165
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700166/*
167 * Access macros for acquiring freeing slots in tx_skbs[].
168 */
169
170static void add_id_to_freelist(unsigned *head, union skb_entry *list,
171 unsigned short id)
172{
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700173 skb_entry_set_link(&list[id], *head);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700174 *head = id;
175}
176
177static unsigned short get_id_from_freelist(unsigned *head,
178 union skb_entry *list)
179{
180 unsigned int id = *head;
181 *head = list[id].link;
182 return id;
183}
184
185static int xennet_rxidx(RING_IDX idx)
186{
187 return idx & (NET_RX_RING_SIZE - 1);
188}
189
190static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
191 RING_IDX ri)
192{
193 int i = xennet_rxidx(ri);
194 struct sk_buff *skb = np->rx_skbs[i];
195 np->rx_skbs[i] = NULL;
196 return skb;
197}
198
199static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
200 RING_IDX ri)
201{
202 int i = xennet_rxidx(ri);
203 grant_ref_t ref = np->grant_rx_ref[i];
204 np->grant_rx_ref[i] = GRANT_INVALID_REF;
205 return ref;
206}
207
208#ifdef CONFIG_SYSFS
209static int xennet_sysfs_addif(struct net_device *netdev);
210static void xennet_sysfs_delif(struct net_device *netdev);
211#else /* !CONFIG_SYSFS */
212#define xennet_sysfs_addif(dev) (0)
213#define xennet_sysfs_delif(dev) do { } while (0)
214#endif
215
Michał Mirosław3ad9b352011-11-16 14:05:33 +0000216static bool xennet_can_sg(struct net_device *dev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700217{
Michał Mirosław3ad9b352011-11-16 14:05:33 +0000218 return dev->features & NETIF_F_SG;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700219}
220
221
222static void rx_refill_timeout(unsigned long data)
223{
224 struct net_device *dev = (struct net_device *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700225 struct netfront_info *np = netdev_priv(dev);
Ben Hutchings288379f2009-01-19 16:43:59 -0800226 napi_schedule(&np->napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700227}
228
229static int netfront_tx_slot_available(struct netfront_info *np)
230{
Eric Dumazet807540b2010-09-23 05:40:09 +0000231 return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
232 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700233}
234
235static void xennet_maybe_wake_tx(struct net_device *dev)
236{
237 struct netfront_info *np = netdev_priv(dev);
238
239 if (unlikely(netif_queue_stopped(dev)) &&
240 netfront_tx_slot_available(np) &&
241 likely(netif_running(dev)))
242 netif_wake_queue(dev);
243}
244
245static void xennet_alloc_rx_buffers(struct net_device *dev)
246{
247 unsigned short id;
248 struct netfront_info *np = netdev_priv(dev);
249 struct sk_buff *skb;
250 struct page *page;
251 int i, batch_target, notify;
252 RING_IDX req_prod = np->rx.req_prod_pvt;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700253 grant_ref_t ref;
254 unsigned long pfn;
255 void *vaddr;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700256 struct xen_netif_rx_request *req;
257
258 if (unlikely(!netif_carrier_ok(dev)))
259 return;
260
261 /*
262 * Allocate skbuffs greedily, even though we batch updates to the
263 * receive ring. This creates a less bursty demand on the memory
264 * allocator, so should reduce the chance of failed allocation requests
265 * both for ourself and for other kernel subsystems.
266 */
267 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
268 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
Isaku Yamahata617a20b2008-10-14 17:50:42 -0700269 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700270 GFP_ATOMIC | __GFP_NOWARN);
271 if (unlikely(!skb))
272 goto no_skb;
273
Isaku Yamahata617a20b2008-10-14 17:50:42 -0700274 /* Align ip header to a 16 bytes boundary */
275 skb_reserve(skb, NET_IP_ALIGN);
276
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700277 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
278 if (!page) {
279 kfree_skb(skb);
280no_skb:
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700281 /* Could not allocate any skbuffs. Try again later. */
282 mod_timer(&np->rx_refill_timer,
283 jiffies + (HZ/10));
Ma JieYuefdcf7762013-11-15 12:26:13 +0800284
285 /* Any skbuffs queued for refill? Force them out. */
286 if (i != 0)
287 goto refill;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700288 break;
289 }
290
Jan Beulich093b9c72013-07-17 08:09:37 +0100291 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700292 __skb_queue_tail(&np->rx_batch, skb);
293 }
294
295 /* Is the batch large enough to be worthwhile? */
296 if (i < (np->rx_target/2)) {
297 if (req_prod > np->rx.sring->req_prod)
298 goto push;
299 return;
300 }
301
302 /* Adjust our fill target if we risked running out of buffers. */
303 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
304 ((np->rx_target *= 2) > np->rx_max_target))
305 np->rx_target = np->rx_max_target;
306
307 refill:
Jeremy Fitzhardinge5dcddfa2007-08-07 14:56:42 -0700308 for (i = 0; ; i++) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700309 skb = __skb_dequeue(&np->rx_batch);
310 if (skb == NULL)
311 break;
312
313 skb->dev = dev;
314
315 id = xennet_rxidx(req_prod + i);
316
317 BUG_ON(np->rx_skbs[id]);
318 np->rx_skbs[id] = skb;
319
320 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
321 BUG_ON((signed short)ref < 0);
322 np->grant_rx_ref[id] = ref;
323
Ian Campbell01c68022011-10-05 00:28:47 +0000324 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
325 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700326
327 req = RING_GET_REQUEST(&np->rx, req_prod + i);
328 gnttab_grant_foreign_access_ref(ref,
329 np->xbdev->otherend_id,
330 pfn_to_mfn(pfn),
331 0);
332
333 req->id = id;
334 req->gref = ref;
335 }
336
Jeremy Fitzhardinge5dcddfa2007-08-07 14:56:42 -0700337 wmb(); /* barrier so backend seens requests */
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700338
339 /* Above is a suitable barrier to ensure backend will see requests. */
340 np->rx.req_prod_pvt = req_prod + i;
341 push:
342 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
343 if (notify)
Wei Liud634bf22013-05-22 06:34:46 +0000344 notify_remote_via_irq(np->rx_irq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700345}
346
347static int xennet_open(struct net_device *dev)
348{
349 struct netfront_info *np = netdev_priv(dev);
350
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700351 napi_enable(&np->napi);
352
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700353 spin_lock_bh(&np->rx_lock);
354 if (netif_carrier_ok(dev)) {
355 xennet_alloc_rx_buffers(dev);
356 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
357 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
Ben Hutchings288379f2009-01-19 16:43:59 -0800358 napi_schedule(&np->napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700359 }
360 spin_unlock_bh(&np->rx_lock);
361
Eduardo Habkost0b1ab1b2008-07-31 17:36:55 -0300362 netif_start_queue(dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700363
364 return 0;
365}
366
367static void xennet_tx_buf_gc(struct net_device *dev)
368{
369 RING_IDX cons, prod;
370 unsigned short id;
371 struct netfront_info *np = netdev_priv(dev);
372 struct sk_buff *skb;
373
374 BUG_ON(!netif_carrier_ok(dev));
375
376 do {
377 prod = np->tx.sring->rsp_prod;
378 rmb(); /* Ensure we see responses up to 'rp'. */
379
380 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
381 struct xen_netif_tx_response *txrsp;
382
383 txrsp = RING_GET_RESPONSE(&np->tx, cons);
Ian Campbellf942dc22011-03-15 00:06:18 +0000384 if (txrsp->status == XEN_NETIF_RSP_NULL)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700385 continue;
386
387 id = txrsp->id;
388 skb = np->tx_skbs[id].skb;
389 if (unlikely(gnttab_query_foreign_access(
390 np->grant_tx_ref[id]) != 0)) {
Joe Perches383eda32013-06-27 21:57:49 -0700391 pr_alert("%s: warning -- grant still in use by backend domain\n",
392 __func__);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700393 BUG();
394 }
395 gnttab_end_foreign_access_ref(
396 np->grant_tx_ref[id], GNTMAP_readonly);
397 gnttab_release_grant_reference(
398 &np->gref_tx_head, np->grant_tx_ref[id]);
399 np->grant_tx_ref[id] = GRANT_INVALID_REF;
Annie Licefe0072014-01-28 11:35:42 +0800400 np->grant_tx_page[id] = NULL;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700401 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
402 dev_kfree_skb_irq(skb);
403 }
404
405 np->tx.rsp_cons = prod;
406
407 /*
408 * Set a new event, then check for race with update of tx_cons.
409 * Note that it is essential to schedule a callback, no matter
410 * how few buffers are pending. Even if there is space in the
411 * transmit ring, higher layers may be blocked because too much
412 * data is outstanding: in such cases notification from Xen is
413 * likely to be the only kick that we'll get.
414 */
415 np->tx.sring->rsp_event =
416 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
417 mb(); /* update shared area */
418 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
419
420 xennet_maybe_wake_tx(dev);
421}
422
423static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
424 struct xen_netif_tx_request *tx)
425{
426 struct netfront_info *np = netdev_priv(dev);
427 char *data = skb->data;
428 unsigned long mfn;
429 RING_IDX prod = np->tx.req_prod_pvt;
430 int frags = skb_shinfo(skb)->nr_frags;
431 unsigned int offset = offset_in_page(data);
432 unsigned int len = skb_headlen(skb);
433 unsigned int id;
434 grant_ref_t ref;
435 int i;
436
437 /* While the header overlaps a page boundary (including being
438 larger than a page), split it it into page-sized chunks. */
439 while (len > PAGE_SIZE - offset) {
440 tx->size = PAGE_SIZE - offset;
Ian Campbellf942dc22011-03-15 00:06:18 +0000441 tx->flags |= XEN_NETTXF_more_data;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700442 len -= tx->size;
443 data += tx->size;
444 offset = 0;
445
446 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
447 np->tx_skbs[id].skb = skb_get(skb);
448 tx = RING_GET_REQUEST(&np->tx, prod++);
449 tx->id = id;
450 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
451 BUG_ON((signed short)ref < 0);
452
453 mfn = virt_to_mfn(data);
454 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
455 mfn, GNTMAP_readonly);
456
Annie Licefe0072014-01-28 11:35:42 +0800457 np->grant_tx_page[id] = virt_to_page(data);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700458 tx->gref = np->grant_tx_ref[id] = ref;
459 tx->offset = offset;
460 tx->size = len;
461 tx->flags = 0;
462 }
463
464 /* Grant backend access to each skb fragment page. */
465 for (i = 0; i < frags; i++) {
466 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
Ian Campbellf36c3742012-11-21 02:02:16 +0000467 struct page *page = skb_frag_page(frag);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700468
Ian Campbellf36c3742012-11-21 02:02:16 +0000469 len = skb_frag_size(frag);
470 offset = frag->page_offset;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700471
Ian Campbellf36c3742012-11-21 02:02:16 +0000472 /* Data must not cross a page boundary. */
473 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700474
Ian Campbellf36c3742012-11-21 02:02:16 +0000475 /* Skip unused frames from start of page */
476 page += offset >> PAGE_SHIFT;
477 offset &= ~PAGE_MASK;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700478
Ian Campbellf36c3742012-11-21 02:02:16 +0000479 while (len > 0) {
480 unsigned long bytes;
481
482 BUG_ON(offset >= PAGE_SIZE);
483
484 bytes = PAGE_SIZE - offset;
485 if (bytes > len)
486 bytes = len;
487
488 tx->flags |= XEN_NETTXF_more_data;
489
490 id = get_id_from_freelist(&np->tx_skb_freelist,
491 np->tx_skbs);
492 np->tx_skbs[id].skb = skb_get(skb);
493 tx = RING_GET_REQUEST(&np->tx, prod++);
494 tx->id = id;
495 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
496 BUG_ON((signed short)ref < 0);
497
498 mfn = pfn_to_mfn(page_to_pfn(page));
499 gnttab_grant_foreign_access_ref(ref,
500 np->xbdev->otherend_id,
501 mfn, GNTMAP_readonly);
502
Annie Licefe0072014-01-28 11:35:42 +0800503 np->grant_tx_page[id] = page;
Ian Campbellf36c3742012-11-21 02:02:16 +0000504 tx->gref = np->grant_tx_ref[id] = ref;
505 tx->offset = offset;
506 tx->size = bytes;
507 tx->flags = 0;
508
509 offset += bytes;
510 len -= bytes;
511
512 /* Next frame */
513 if (offset == PAGE_SIZE && len) {
514 BUG_ON(!PageCompound(page));
515 page++;
516 offset = 0;
517 }
518 }
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700519 }
520
521 np->tx.req_prod_pvt = prod;
522}
523
Ian Campbellf36c3742012-11-21 02:02:16 +0000524/*
525 * Count how many ring slots are required to send the frags of this
526 * skb. Each frag might be a compound page.
527 */
528static int xennet_count_skb_frag_slots(struct sk_buff *skb)
529{
530 int i, frags = skb_shinfo(skb)->nr_frags;
531 int pages = 0;
532
533 for (i = 0; i < frags; i++) {
534 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
535 unsigned long size = skb_frag_size(frag);
536 unsigned long offset = frag->page_offset;
537
538 /* Skip unused frames from start of page */
539 offset &= ~PAGE_MASK;
540
541 pages += PFN_UP(offset + size);
542 }
543
544 return pages;
545}
546
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700547static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
548{
549 unsigned short id;
550 struct netfront_info *np = netdev_priv(dev);
stephen hemmingere00f85b2011-06-21 05:35:31 +0000551 struct netfront_stats *stats = this_cpu_ptr(np->stats);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700552 struct xen_netif_tx_request *tx;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700553 char *data = skb->data;
554 RING_IDX i;
555 grant_ref_t ref;
556 unsigned long mfn;
557 int notify;
Ian Campbellf36c3742012-11-21 02:02:16 +0000558 int slots;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700559 unsigned int offset = offset_in_page(data);
560 unsigned int len = skb_headlen(skb);
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000561 unsigned long flags;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700562
Wei Liu9ecd1a72013-04-22 02:20:41 +0000563 /* If skb->len is too big for wire format, drop skb and alert
564 * user about misconfiguration.
565 */
566 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
567 net_alert_ratelimited(
568 "xennet: skb->len = %u, too big for wire format\n",
569 skb->len);
570 goto drop;
571 }
572
Ian Campbellf36c3742012-11-21 02:02:16 +0000573 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
574 xennet_count_skb_frag_slots(skb);
575 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
576 net_alert_ratelimited(
577 "xennet: skb rides the rocket: %d slots\n", slots);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700578 goto drop;
579 }
580
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000581 spin_lock_irqsave(&np->tx_lock, flags);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700582
583 if (unlikely(!netif_carrier_ok(dev) ||
Ian Campbellf36c3742012-11-21 02:02:16 +0000584 (slots > 1 && !xennet_can_sg(dev)) ||
Jesse Grossfc741212011-01-09 06:23:32 +0000585 netif_needs_gso(skb, netif_skb_features(skb)))) {
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000586 spin_unlock_irqrestore(&np->tx_lock, flags);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700587 goto drop;
588 }
589
590 i = np->tx.req_prod_pvt;
591
592 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
593 np->tx_skbs[id].skb = skb;
594
595 tx = RING_GET_REQUEST(&np->tx, i);
596
597 tx->id = id;
598 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
599 BUG_ON((signed short)ref < 0);
600 mfn = virt_to_mfn(data);
601 gnttab_grant_foreign_access_ref(
602 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
Annie Licefe0072014-01-28 11:35:42 +0800603 np->grant_tx_page[id] = virt_to_page(data);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700604 tx->gref = np->grant_tx_ref[id] = ref;
605 tx->offset = offset;
606 tx->size = len;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700607
608 tx->flags = 0;
609 if (skb->ip_summed == CHECKSUM_PARTIAL)
610 /* local packet? */
Ian Campbellf942dc22011-03-15 00:06:18 +0000611 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700612 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
613 /* remote but checksummed. */
Ian Campbellf942dc22011-03-15 00:06:18 +0000614 tx->flags |= XEN_NETTXF_data_validated;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700615
616 if (skb_shinfo(skb)->gso_size) {
617 struct xen_netif_extra_info *gso;
618
619 gso = (struct xen_netif_extra_info *)
620 RING_GET_REQUEST(&np->tx, ++i);
621
Wei Liue2d617c02013-03-25 01:08:17 +0000622 tx->flags |= XEN_NETTXF_extra_info;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700623
624 gso->u.gso.size = skb_shinfo(skb)->gso_size;
Paul Durrant2c0057d2014-01-15 17:30:33 +0000625 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
626 XEN_NETIF_GSO_TYPE_TCPV6 :
627 XEN_NETIF_GSO_TYPE_TCPV4;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700628 gso->u.gso.pad = 0;
629 gso->u.gso.features = 0;
630
631 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
632 gso->flags = 0;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700633 }
634
635 np->tx.req_prod_pvt = i + 1;
636
637 xennet_make_frags(skb, dev, tx);
638 tx->size = skb->len;
639
640 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
641 if (notify)
Wei Liud634bf22013-05-22 06:34:46 +0000642 notify_remote_via_irq(np->tx_irq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700643
stephen hemmingere00f85b2011-06-21 05:35:31 +0000644 u64_stats_update_begin(&stats->syncp);
645 stats->tx_bytes += skb->len;
646 stats->tx_packets++;
647 u64_stats_update_end(&stats->syncp);
Jeremy Fitzhardinge10a273a2007-08-13 12:54:37 -0700648
649 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700650 xennet_tx_buf_gc(dev);
651
652 if (!netfront_tx_slot_available(np))
653 netif_stop_queue(dev);
654
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000655 spin_unlock_irqrestore(&np->tx_lock, flags);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700656
Patrick McHardy6ed10652009-06-23 06:03:08 +0000657 return NETDEV_TX_OK;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700658
659 drop:
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700660 dev->stats.tx_dropped++;
Eric W. Biederman979de8a2014-03-15 18:33:04 -0700661 dev_kfree_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000662 return NETDEV_TX_OK;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700663}
664
665static int xennet_close(struct net_device *dev)
666{
667 struct netfront_info *np = netdev_priv(dev);
668 netif_stop_queue(np->netdev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 napi_disable(&np->napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700670 return 0;
671}
672
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700673static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
674 grant_ref_t ref)
675{
676 int new = xennet_rxidx(np->rx.req_prod_pvt);
677
678 BUG_ON(np->rx_skbs[new]);
679 np->rx_skbs[new] = skb;
680 np->grant_rx_ref[new] = ref;
681 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
682 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
683 np->rx.req_prod_pvt++;
684}
685
686static int xennet_get_extras(struct netfront_info *np,
687 struct xen_netif_extra_info *extras,
688 RING_IDX rp)
689
690{
691 struct xen_netif_extra_info *extra;
692 struct device *dev = &np->netdev->dev;
693 RING_IDX cons = np->rx.rsp_cons;
694 int err = 0;
695
696 do {
697 struct sk_buff *skb;
698 grant_ref_t ref;
699
700 if (unlikely(cons + 1 == rp)) {
701 if (net_ratelimit())
702 dev_warn(dev, "Missing extra info\n");
703 err = -EBADR;
704 break;
705 }
706
707 extra = (struct xen_netif_extra_info *)
708 RING_GET_RESPONSE(&np->rx, ++cons);
709
710 if (unlikely(!extra->type ||
711 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
712 if (net_ratelimit())
713 dev_warn(dev, "Invalid extra type: %d\n",
714 extra->type);
715 err = -EINVAL;
716 } else {
717 memcpy(&extras[extra->type - 1], extra,
718 sizeof(*extra));
719 }
720
721 skb = xennet_get_rx_skb(np, cons);
722 ref = xennet_get_rx_ref(np, cons);
723 xennet_move_rx_slot(np, skb, ref);
724 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
725
726 np->rx.rsp_cons = cons;
727 return err;
728}
729
730static int xennet_get_responses(struct netfront_info *np,
731 struct netfront_rx_info *rinfo, RING_IDX rp,
732 struct sk_buff_head *list)
733{
734 struct xen_netif_rx_response *rx = &rinfo->rx;
735 struct xen_netif_extra_info *extras = rinfo->extras;
736 struct device *dev = &np->netdev->dev;
737 RING_IDX cons = np->rx.rsp_cons;
738 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
739 grant_ref_t ref = xennet_get_rx_ref(np, cons);
740 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
Wei Liu7158ff62013-03-25 01:08:19 +0000741 int slots = 1;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700742 int err = 0;
743 unsigned long ret;
744
Ian Campbellf942dc22011-03-15 00:06:18 +0000745 if (rx->flags & XEN_NETRXF_extra_info) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700746 err = xennet_get_extras(np, extras, rp);
747 cons = np->rx.rsp_cons;
748 }
749
750 for (;;) {
751 if (unlikely(rx->status < 0 ||
752 rx->offset + rx->status > PAGE_SIZE)) {
753 if (net_ratelimit())
754 dev_warn(dev, "rx->offset: %x, size: %u\n",
755 rx->offset, rx->status);
756 xennet_move_rx_slot(np, skb, ref);
757 err = -EINVAL;
758 goto next;
759 }
760
761 /*
762 * This definitely indicates a bug, either in this driver or in
763 * the backend driver. In future this should flag the bad
Wei Liu697089d2013-04-22 02:20:40 +0000764 * situation to the system controller to reboot the backend.
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700765 */
766 if (ref == GRANT_INVALID_REF) {
767 if (net_ratelimit())
768 dev_warn(dev, "Bad rx response id %d.\n",
769 rx->id);
770 err = -EINVAL;
771 goto next;
772 }
773
774 ret = gnttab_end_foreign_access_ref(ref, 0);
775 BUG_ON(!ret);
776
777 gnttab_release_grant_reference(&np->gref_rx_head, ref);
778
779 __skb_queue_tail(list, skb);
780
781next:
Ian Campbellf942dc22011-03-15 00:06:18 +0000782 if (!(rx->flags & XEN_NETRXF_more_data))
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700783 break;
784
Wei Liu7158ff62013-03-25 01:08:19 +0000785 if (cons + slots == rp) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700786 if (net_ratelimit())
Wei Liu7158ff62013-03-25 01:08:19 +0000787 dev_warn(dev, "Need more slots\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700788 err = -ENOENT;
789 break;
790 }
791
Wei Liu7158ff62013-03-25 01:08:19 +0000792 rx = RING_GET_RESPONSE(&np->rx, cons + slots);
793 skb = xennet_get_rx_skb(np, cons + slots);
794 ref = xennet_get_rx_ref(np, cons + slots);
795 slots++;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700796 }
797
Wei Liu7158ff62013-03-25 01:08:19 +0000798 if (unlikely(slots > max)) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700799 if (net_ratelimit())
Wei Liu697089d2013-04-22 02:20:40 +0000800 dev_warn(dev, "Too many slots\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700801 err = -E2BIG;
802 }
803
804 if (unlikely(err))
Wei Liu7158ff62013-03-25 01:08:19 +0000805 np->rx.rsp_cons = cons + slots;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700806
807 return err;
808}
809
810static int xennet_set_skb_gso(struct sk_buff *skb,
811 struct xen_netif_extra_info *gso)
812{
813 if (!gso->u.gso.size) {
814 if (net_ratelimit())
Joe Perches383eda32013-06-27 21:57:49 -0700815 pr_warn("GSO size must not be zero\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700816 return -EINVAL;
817 }
818
Paul Durrant2c0057d2014-01-15 17:30:33 +0000819 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
820 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700821 if (net_ratelimit())
Joe Perches383eda32013-06-27 21:57:49 -0700822 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700823 return -EINVAL;
824 }
825
826 skb_shinfo(skb)->gso_size = gso->u.gso.size;
Paul Durrant2c0057d2014-01-15 17:30:33 +0000827 skb_shinfo(skb)->gso_type =
828 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
829 SKB_GSO_TCPV4 :
830 SKB_GSO_TCPV6;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700831
832 /* Header must be checked, and gso_segs computed. */
833 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
834 skb_shinfo(skb)->gso_segs = 0;
835
836 return 0;
837}
838
839static RING_IDX xennet_fill_frags(struct netfront_info *np,
840 struct sk_buff *skb,
841 struct sk_buff_head *list)
842{
843 struct skb_shared_info *shinfo = skb_shinfo(skb);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700844 RING_IDX cons = np->rx.rsp_cons;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700845 struct sk_buff *nskb;
846
847 while ((nskb = __skb_dequeue(list))) {
848 struct xen_netif_rx_response *rx =
849 RING_GET_RESPONSE(&np->rx, ++cons);
Ian Campbell01c68022011-10-05 00:28:47 +0000850 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700851
Jan Beulich093b9c72013-07-17 08:09:37 +0100852 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
853 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700854
Jan Beulich093b9c72013-07-17 08:09:37 +0100855 BUG_ON(pull_to <= skb_headlen(skb));
856 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
857 }
858 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
859
860 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
861 rx->offset, rx->status, PAGE_SIZE);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700862
863 skb_shinfo(nskb)->nr_frags = 0;
864 kfree_skb(nskb);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700865 }
866
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700867 return cons;
868}
869
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000870static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700871{
Paul Durrantb5cf66c2014-01-09 10:02:48 +0000872 bool recalculate_partial_csum = false;
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000873
874 /*
875 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
876 * peers can fail to set NETRXF_csum_blank when sending a GSO
877 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
878 * recalculate the partial checksum.
879 */
880 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
881 struct netfront_info *np = netdev_priv(dev);
882 np->rx_gso_checksum_fixup++;
883 skb->ip_summed = CHECKSUM_PARTIAL;
Paul Durrantb5cf66c2014-01-09 10:02:48 +0000884 recalculate_partial_csum = true;
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000885 }
886
887 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
888 if (skb->ip_summed != CHECKSUM_PARTIAL)
889 return 0;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700890
Paul Durrantb5cf66c2014-01-09 10:02:48 +0000891 return skb_checksum_setup(skb, recalculate_partial_csum);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700892}
893
894static int handle_incoming_queue(struct net_device *dev,
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700895 struct sk_buff_head *rxq)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700896{
stephen hemmingere00f85b2011-06-21 05:35:31 +0000897 struct netfront_info *np = netdev_priv(dev);
898 struct netfront_stats *stats = this_cpu_ptr(np->stats);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700899 int packets_dropped = 0;
900 struct sk_buff *skb;
901
902 while ((skb = __skb_dequeue(rxq)) != NULL) {
Ian Campbell36832432012-08-22 00:26:47 +0000903 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700904
Jan Beulich093b9c72013-07-17 08:09:37 +0100905 if (pull_to > skb_headlen(skb))
906 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700907
908 /* Ethernet work: Delayed to here as it peeks the header. */
909 skb->protocol = eth_type_trans(skb, dev);
Wei Liud554f732014-02-19 18:48:34 +0000910 skb_reset_network_header(skb);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700911
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000912 if (checksum_setup(dev, skb)) {
913 kfree_skb(skb);
914 packets_dropped++;
915 dev->stats.rx_errors++;
916 continue;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700917 }
918
stephen hemmingere00f85b2011-06-21 05:35:31 +0000919 u64_stats_update_begin(&stats->syncp);
920 stats->rx_packets++;
921 stats->rx_bytes += skb->len;
922 u64_stats_update_end(&stats->syncp);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700923
924 /* Pass it up. */
Wei Liu99d3d582013-09-30 13:46:34 +0100925 napi_gro_receive(&np->napi, skb);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700926 }
927
928 return packets_dropped;
929}
930
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700931static int xennet_poll(struct napi_struct *napi, int budget)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700932{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700933 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
934 struct net_device *dev = np->netdev;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700935 struct sk_buff *skb;
936 struct netfront_rx_info rinfo;
937 struct xen_netif_rx_response *rx = &rinfo.rx;
938 struct xen_netif_extra_info *extras = rinfo.extras;
939 RING_IDX i, rp;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700940 int work_done;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700941 struct sk_buff_head rxq;
942 struct sk_buff_head errq;
943 struct sk_buff_head tmpq;
944 unsigned long flags;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700945 int err;
946
947 spin_lock(&np->rx_lock);
948
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700949 skb_queue_head_init(&rxq);
950 skb_queue_head_init(&errq);
951 skb_queue_head_init(&tmpq);
952
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700953 rp = np->rx.sring->rsp_prod;
954 rmb(); /* Ensure we see queued responses up to 'rp'. */
955
956 i = np->rx.rsp_cons;
957 work_done = 0;
958 while ((i != rp) && (work_done < budget)) {
959 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
960 memset(extras, 0, sizeof(rinfo.extras));
961
962 err = xennet_get_responses(np, &rinfo, rp, &tmpq);
963
964 if (unlikely(err)) {
965err:
966 while ((skb = __skb_dequeue(&tmpq)))
967 __skb_queue_tail(&errq, skb);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700968 dev->stats.rx_errors++;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700969 i = np->rx.rsp_cons;
970 continue;
971 }
972
973 skb = __skb_dequeue(&tmpq);
974
975 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
976 struct xen_netif_extra_info *gso;
977 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
978
979 if (unlikely(xennet_set_skb_gso(skb, gso))) {
980 __skb_queue_head(&tmpq, skb);
981 np->rx.rsp_cons += skb_queue_len(&tmpq);
982 goto err;
983 }
984 }
985
Ian Campbell36832432012-08-22 00:26:47 +0000986 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
987 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
988 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700989
Ian Campbell36832432012-08-22 00:26:47 +0000990 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
991 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
992 skb->data_len = rx->status;
Jan Beulich093b9c72013-07-17 08:09:37 +0100993 skb->len += rx->status;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700994
995 i = xennet_fill_frags(np, skb, &tmpq);
996
Ian Campbellf942dc22011-03-15 00:06:18 +0000997 if (rx->flags & XEN_NETRXF_csum_blank)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700998 skb->ip_summed = CHECKSUM_PARTIAL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000999 else if (rx->flags & XEN_NETRXF_data_validated)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001000 skb->ip_summed = CHECKSUM_UNNECESSARY;
1001
1002 __skb_queue_tail(&rxq, skb);
1003
1004 np->rx.rsp_cons = ++i;
1005 work_done++;
1006 }
1007
Wang Chen56cfe5d2008-05-22 18:09:06 +08001008 __skb_queue_purge(&errq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001009
1010 work_done -= handle_incoming_queue(dev, &rxq);
1011
1012 /* If we get a callback with very few responses, reduce fill target. */
1013 /* NB. Note exponential increase, linear decrease. */
1014 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1015 ((3*np->rx_target) / 4)) &&
1016 (--np->rx_target < np->rx_min_target))
1017 np->rx_target = np->rx_min_target;
1018
1019 xennet_alloc_rx_buffers(dev);
1020
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001021 if (work_done < budget) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001022 int more_to_do = 0;
1023
Wei Liu99d3d582013-09-30 13:46:34 +01001024 napi_gro_flush(napi, false);
1025
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001026 local_irq_save(flags);
1027
1028 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1029 if (!more_to_do)
Ben Hutchings288379f2009-01-19 16:43:59 -08001030 __napi_complete(napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001031
1032 local_irq_restore(flags);
1033 }
1034
1035 spin_unlock(&np->rx_lock);
1036
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001037 return work_done;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001038}
1039
1040static int xennet_change_mtu(struct net_device *dev, int mtu)
1041{
Wei Liu9ecd1a72013-04-22 02:20:41 +00001042 int max = xennet_can_sg(dev) ?
1043 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001044
1045 if (mtu > max)
1046 return -EINVAL;
1047 dev->mtu = mtu;
1048 return 0;
1049}
1050
stephen hemmingere00f85b2011-06-21 05:35:31 +00001051static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1052 struct rtnl_link_stats64 *tot)
1053{
1054 struct netfront_info *np = netdev_priv(dev);
1055 int cpu;
1056
1057 for_each_possible_cpu(cpu) {
1058 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1059 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1060 unsigned int start;
1061
1062 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001063 start = u64_stats_fetch_begin_irq(&stats->syncp);
stephen hemmingere00f85b2011-06-21 05:35:31 +00001064
1065 rx_packets = stats->rx_packets;
1066 tx_packets = stats->tx_packets;
1067 rx_bytes = stats->rx_bytes;
1068 tx_bytes = stats->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001069 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
stephen hemmingere00f85b2011-06-21 05:35:31 +00001070
1071 tot->rx_packets += rx_packets;
1072 tot->tx_packets += tx_packets;
1073 tot->rx_bytes += rx_bytes;
1074 tot->tx_bytes += tx_bytes;
1075 }
1076
1077 tot->rx_errors = dev->stats.rx_errors;
1078 tot->tx_dropped = dev->stats.tx_dropped;
1079
1080 return tot;
1081}
1082
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001083static void xennet_release_tx_bufs(struct netfront_info *np)
1084{
1085 struct sk_buff *skb;
1086 int i;
1087
1088 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1089 /* Skip over entries which are actually freelist references */
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -07001090 if (skb_entry_is_link(&np->tx_skbs[i]))
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001091 continue;
1092
1093 skb = np->tx_skbs[i].skb;
Annie Licefe0072014-01-28 11:35:42 +08001094 get_page(np->grant_tx_page[i]);
1095 gnttab_end_foreign_access(np->grant_tx_ref[i],
1096 GNTMAP_readonly,
1097 (unsigned long)page_address(np->grant_tx_page[i]));
1098 np->grant_tx_page[i] = NULL;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001099 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1100 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1101 dev_kfree_skb_irq(skb);
1102 }
1103}
1104
1105static void xennet_release_rx_bufs(struct netfront_info *np)
1106{
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001107 int id, ref;
1108
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001109 spin_lock_bh(&np->rx_lock);
1110
1111 for (id = 0; id < NET_RX_RING_SIZE; id++) {
Annie Licefe0072014-01-28 11:35:42 +08001112 struct sk_buff *skb;
1113 struct page *page;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001114
1115 skb = np->rx_skbs[id];
Annie Licefe0072014-01-28 11:35:42 +08001116 if (!skb)
1117 continue;
1118
1119 ref = np->grant_rx_ref[id];
1120 if (ref == GRANT_INVALID_REF)
1121 continue;
1122
1123 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1124
1125 /* gnttab_end_foreign_access() needs a page ref until
1126 * foreign access is ended (which may be deferred).
1127 */
1128 get_page(page);
1129 gnttab_end_foreign_access(ref, 0,
1130 (unsigned long)page_address(page));
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001131 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1132
Annie Licefe0072014-01-28 11:35:42 +08001133 kfree_skb(skb);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001134 }
1135
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001136 spin_unlock_bh(&np->rx_lock);
1137}
1138
1139static void xennet_uninit(struct net_device *dev)
1140{
1141 struct netfront_info *np = netdev_priv(dev);
1142 xennet_release_tx_bufs(np);
1143 xennet_release_rx_bufs(np);
1144 gnttab_free_grant_references(np->gref_tx_head);
1145 gnttab_free_grant_references(np->gref_rx_head);
1146}
1147
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001148static netdev_features_t xennet_fix_features(struct net_device *dev,
1149 netdev_features_t features)
Eric Dumazet8f7b01a2011-04-03 17:21:00 -07001150{
1151 struct netfront_info *np = netdev_priv(dev);
1152 int val;
1153
1154 if (features & NETIF_F_SG) {
1155 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1156 "%d", &val) < 0)
1157 val = 0;
1158
1159 if (!val)
1160 features &= ~NETIF_F_SG;
1161 }
1162
Paul Durrant2c0057d2014-01-15 17:30:33 +00001163 if (features & NETIF_F_IPV6_CSUM) {
1164 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1165 "feature-ipv6-csum-offload", "%d", &val) < 0)
1166 val = 0;
1167
1168 if (!val)
1169 features &= ~NETIF_F_IPV6_CSUM;
1170 }
1171
Eric Dumazet8f7b01a2011-04-03 17:21:00 -07001172 if (features & NETIF_F_TSO) {
1173 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1174 "feature-gso-tcpv4", "%d", &val) < 0)
1175 val = 0;
1176
1177 if (!val)
1178 features &= ~NETIF_F_TSO;
1179 }
1180
Paul Durrant2c0057d2014-01-15 17:30:33 +00001181 if (features & NETIF_F_TSO6) {
1182 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1183 "feature-gso-tcpv6", "%d", &val) < 0)
1184 val = 0;
1185
1186 if (!val)
1187 features &= ~NETIF_F_TSO6;
1188 }
1189
Eric Dumazet8f7b01a2011-04-03 17:21:00 -07001190 return features;
1191}
1192
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001193static int xennet_set_features(struct net_device *dev,
1194 netdev_features_t features)
Eric Dumazet8f7b01a2011-04-03 17:21:00 -07001195{
1196 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1197 netdev_info(dev, "Reducing MTU because no SG offload");
1198 dev->mtu = ETH_DATA_LEN;
1199 }
1200
1201 return 0;
1202}
1203
Wei Liud634bf22013-05-22 06:34:46 +00001204static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001205{
Wei Liud634bf22013-05-22 06:34:46 +00001206 struct netfront_info *np = dev_id;
1207 struct net_device *dev = np->netdev;
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001208 unsigned long flags;
1209
1210 spin_lock_irqsave(&np->tx_lock, flags);
Wei Liud634bf22013-05-22 06:34:46 +00001211 xennet_tx_buf_gc(dev);
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001212 spin_unlock_irqrestore(&np->tx_lock, flags);
1213
1214 return IRQ_HANDLED;
1215}
1216
Wei Liud634bf22013-05-22 06:34:46 +00001217static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1218{
1219 struct netfront_info *np = dev_id;
1220 struct net_device *dev = np->netdev;
1221
1222 if (likely(netif_carrier_ok(dev) &&
1223 RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1224 napi_schedule(&np->napi);
1225
1226 return IRQ_HANDLED;
1227}
1228
1229static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1230{
1231 xennet_tx_interrupt(irq, dev_id);
1232 xennet_rx_interrupt(irq, dev_id);
1233 return IRQ_HANDLED;
1234}
1235
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001236#ifdef CONFIG_NET_POLL_CONTROLLER
1237static void xennet_poll_controller(struct net_device *dev)
1238{
1239 xennet_interrupt(0, dev);
1240}
1241#endif
1242
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001243static const struct net_device_ops xennet_netdev_ops = {
1244 .ndo_open = xennet_open,
1245 .ndo_uninit = xennet_uninit,
1246 .ndo_stop = xennet_close,
1247 .ndo_start_xmit = xennet_start_xmit,
1248 .ndo_change_mtu = xennet_change_mtu,
stephen hemmingere00f85b2011-06-21 05:35:31 +00001249 .ndo_get_stats64 = xennet_get_stats64,
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001250 .ndo_set_mac_address = eth_mac_addr,
1251 .ndo_validate_addr = eth_validate_addr,
Michał Mirosławfb507932011-03-31 01:01:35 +00001252 .ndo_fix_features = xennet_fix_features,
1253 .ndo_set_features = xennet_set_features,
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001254#ifdef CONFIG_NET_POLL_CONTROLLER
1255 .ndo_poll_controller = xennet_poll_controller,
1256#endif
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001257};
1258
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05001259static struct net_device *xennet_create_dev(struct xenbus_device *dev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001260{
1261 int i, err;
1262 struct net_device *netdev;
1263 struct netfront_info *np;
1264
1265 netdev = alloc_etherdev(sizeof(struct netfront_info));
Joe Perches41de8d42012-01-29 13:47:52 +00001266 if (!netdev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001267 return ERR_PTR(-ENOMEM);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001268
1269 np = netdev_priv(netdev);
1270 np->xbdev = dev;
1271
1272 spin_lock_init(&np->tx_lock);
1273 spin_lock_init(&np->rx_lock);
1274
1275 skb_queue_head_init(&np->rx_batch);
1276 np->rx_target = RX_DFL_MIN_TARGET;
1277 np->rx_min_target = RX_DFL_MIN_TARGET;
1278 np->rx_max_target = RX_MAX_TARGET;
1279
1280 init_timer(&np->rx_refill_timer);
1281 np->rx_refill_timer.data = (unsigned long)netdev;
1282 np->rx_refill_timer.function = rx_refill_timeout;
1283
stephen hemmingere00f85b2011-06-21 05:35:31 +00001284 err = -ENOMEM;
WANG Cong1c213bd2014-02-13 11:46:28 -08001285 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
stephen hemmingere00f85b2011-06-21 05:35:31 +00001286 if (np->stats == NULL)
1287 goto exit;
1288
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001289 /* Initialise tx_skbs as a free chain containing every entry. */
1290 np->tx_skb_freelist = 0;
1291 for (i = 0; i < NET_TX_RING_SIZE; i++) {
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -07001292 skb_entry_set_link(&np->tx_skbs[i], i+1);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001293 np->grant_tx_ref[i] = GRANT_INVALID_REF;
Vincenzo Maffione810d8ce2014-04-12 11:55:40 +02001294 np->grant_tx_page[i] = NULL;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001295 }
1296
1297 /* Clear out rx_skbs */
1298 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1299 np->rx_skbs[i] = NULL;
1300 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1301 }
1302
1303 /* A grant for every tx ring slot */
1304 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1305 &np->gref_tx_head) < 0) {
Joe Perches383eda32013-06-27 21:57:49 -07001306 pr_alert("can't alloc tx grant refs\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001307 err = -ENOMEM;
stephen hemmingere00f85b2011-06-21 05:35:31 +00001308 goto exit_free_stats;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001309 }
1310 /* A grant for every rx ring slot */
1311 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1312 &np->gref_rx_head) < 0) {
Joe Perches383eda32013-06-27 21:57:49 -07001313 pr_alert("can't alloc rx grant refs\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001314 err = -ENOMEM;
1315 goto exit_free_tx;
1316 }
1317
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001318 netdev->netdev_ops = &xennet_netdev_ops;
1319
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001320 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
Michał Mirosławfb507932011-03-31 01:01:35 +00001321 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1322 NETIF_F_GSO_ROBUST;
Paul Durrant2c0057d2014-01-15 17:30:33 +00001323 netdev->hw_features = NETIF_F_SG |
1324 NETIF_F_IPV6_CSUM |
1325 NETIF_F_TSO | NETIF_F_TSO6;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001326
Ian Campbellfc3e5942011-04-04 11:07:57 -07001327 /*
1328 * Assume that all hw features are available for now. This set
1329 * will be adjusted by the call to netdev_update_features() in
1330 * xennet_connect() which is the earliest point where we can
1331 * negotiate with the backend regarding supported features.
1332 */
1333 netdev->features |= netdev->hw_features;
1334
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001335 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001336 SET_NETDEV_DEV(netdev, &dev->dev);
1337
Wei Liu9ecd1a72013-04-22 02:20:41 +00001338 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1339
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001340 np->netdev = netdev;
1341
1342 netif_carrier_off(netdev);
1343
1344 return netdev;
1345
1346 exit_free_tx:
1347 gnttab_free_grant_references(np->gref_tx_head);
stephen hemmingere00f85b2011-06-21 05:35:31 +00001348 exit_free_stats:
1349 free_percpu(np->stats);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001350 exit:
1351 free_netdev(netdev);
1352 return ERR_PTR(err);
1353}
1354
1355/**
1356 * Entry point to this code when a new device is created. Allocate the basic
1357 * structures and the ring buffers for communication with the backend, and
1358 * inform the backend of the appropriate details for those.
1359 */
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05001360static int netfront_probe(struct xenbus_device *dev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00001361 const struct xenbus_device_id *id)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001362{
1363 int err;
1364 struct net_device *netdev;
1365 struct netfront_info *info;
1366
1367 netdev = xennet_create_dev(dev);
1368 if (IS_ERR(netdev)) {
1369 err = PTR_ERR(netdev);
1370 xenbus_dev_fatal(dev, err, "creating netdev");
1371 return err;
1372 }
1373
1374 info = netdev_priv(netdev);
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001375 dev_set_drvdata(&dev->dev, info);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001376
1377 err = register_netdev(info->netdev);
1378 if (err) {
Joe Perches383eda32013-06-27 21:57:49 -07001379 pr_warn("%s: register_netdev err=%d\n", __func__, err);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001380 goto fail;
1381 }
1382
1383 err = xennet_sysfs_addif(info->netdev);
1384 if (err) {
1385 unregister_netdev(info->netdev);
Joe Perches383eda32013-06-27 21:57:49 -07001386 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001387 goto fail;
1388 }
1389
1390 return 0;
1391
1392 fail:
1393 free_netdev(netdev);
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001394 dev_set_drvdata(&dev->dev, NULL);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001395 return err;
1396}
1397
1398static void xennet_end_access(int ref, void *page)
1399{
1400 /* This frees the page as a side-effect */
1401 if (ref != GRANT_INVALID_REF)
1402 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1403}
1404
1405static void xennet_disconnect_backend(struct netfront_info *info)
1406{
1407 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1408 spin_lock_bh(&info->rx_lock);
1409 spin_lock_irq(&info->tx_lock);
1410 netif_carrier_off(info->netdev);
1411 spin_unlock_irq(&info->tx_lock);
1412 spin_unlock_bh(&info->rx_lock);
1413
Wei Liud634bf22013-05-22 06:34:46 +00001414 if (info->tx_irq && (info->tx_irq == info->rx_irq))
1415 unbind_from_irqhandler(info->tx_irq, info);
1416 if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1417 unbind_from_irqhandler(info->tx_irq, info);
1418 unbind_from_irqhandler(info->rx_irq, info);
1419 }
1420 info->tx_evtchn = info->rx_evtchn = 0;
1421 info->tx_irq = info->rx_irq = 0;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001422
1423 /* End access and free the pages */
1424 xennet_end_access(info->tx_ring_ref, info->tx.sring);
1425 xennet_end_access(info->rx_ring_ref, info->rx.sring);
1426
1427 info->tx_ring_ref = GRANT_INVALID_REF;
1428 info->rx_ring_ref = GRANT_INVALID_REF;
1429 info->tx.sring = NULL;
1430 info->rx.sring = NULL;
1431}
1432
1433/**
1434 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1435 * driver restart. We tear down our netif structure and recreate it, but
1436 * leave the device-layer structures intact so that this is transparent to the
1437 * rest of the kernel.
1438 */
1439static int netfront_resume(struct xenbus_device *dev)
1440{
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001441 struct netfront_info *info = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001442
1443 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1444
1445 xennet_disconnect_backend(info);
1446 return 0;
1447}
1448
1449static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1450{
1451 char *s, *e, *macstr;
1452 int i;
1453
1454 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1455 if (IS_ERR(macstr))
1456 return PTR_ERR(macstr);
1457
1458 for (i = 0; i < ETH_ALEN; i++) {
1459 mac[i] = simple_strtoul(s, &e, 16);
1460 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1461 kfree(macstr);
1462 return -ENOENT;
1463 }
1464 s = e+1;
1465 }
1466
1467 kfree(macstr);
1468 return 0;
1469}
1470
Wei Liud634bf22013-05-22 06:34:46 +00001471static int setup_netfront_single(struct netfront_info *info)
1472{
1473 int err;
1474
1475 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1476 if (err < 0)
1477 goto fail;
1478
1479 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1480 xennet_interrupt,
1481 0, info->netdev->name, info);
1482 if (err < 0)
1483 goto bind_fail;
1484 info->rx_evtchn = info->tx_evtchn;
1485 info->rx_irq = info->tx_irq = err;
1486
1487 return 0;
1488
1489bind_fail:
1490 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1491 info->tx_evtchn = 0;
1492fail:
1493 return err;
1494}
1495
1496static int setup_netfront_split(struct netfront_info *info)
1497{
1498 int err;
1499
1500 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1501 if (err < 0)
1502 goto fail;
1503 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1504 if (err < 0)
1505 goto alloc_rx_evtchn_fail;
1506
1507 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1508 "%s-tx", info->netdev->name);
1509 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1510 xennet_tx_interrupt,
1511 0, info->tx_irq_name, info);
1512 if (err < 0)
1513 goto bind_tx_fail;
1514 info->tx_irq = err;
1515
1516 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1517 "%s-rx", info->netdev->name);
1518 err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1519 xennet_rx_interrupt,
1520 0, info->rx_irq_name, info);
1521 if (err < 0)
1522 goto bind_rx_fail;
1523 info->rx_irq = err;
1524
1525 return 0;
1526
1527bind_rx_fail:
1528 unbind_from_irqhandler(info->tx_irq, info);
1529 info->tx_irq = 0;
1530bind_tx_fail:
1531 xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1532 info->rx_evtchn = 0;
1533alloc_rx_evtchn_fail:
1534 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1535 info->tx_evtchn = 0;
1536fail:
1537 return err;
1538}
1539
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001540static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1541{
1542 struct xen_netif_tx_sring *txs;
1543 struct xen_netif_rx_sring *rxs;
1544 int err;
1545 struct net_device *netdev = info->netdev;
Wei Liud634bf22013-05-22 06:34:46 +00001546 unsigned int feature_split_evtchn;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001547
1548 info->tx_ring_ref = GRANT_INVALID_REF;
1549 info->rx_ring_ref = GRANT_INVALID_REF;
1550 info->rx.sring = NULL;
1551 info->tx.sring = NULL;
1552 netdev->irq = 0;
1553
Wei Liud634bf22013-05-22 06:34:46 +00001554 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1555 "feature-split-event-channels", "%u",
1556 &feature_split_evtchn);
1557 if (err < 0)
1558 feature_split_evtchn = 0;
1559
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001560 err = xen_net_read_mac(dev, netdev->dev_addr);
1561 if (err) {
1562 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1563 goto fail;
1564 }
1565
Ian Campbella144ff02008-06-17 10:47:08 +02001566 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001567 if (!txs) {
1568 err = -ENOMEM;
1569 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1570 goto fail;
1571 }
1572 SHARED_RING_INIT(txs);
1573 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1574
1575 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
Wei Liu1ca29832013-05-20 01:05:12 +00001576 if (err < 0)
1577 goto grant_tx_ring_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001578
1579 info->tx_ring_ref = err;
Ian Campbella144ff02008-06-17 10:47:08 +02001580 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001581 if (!rxs) {
1582 err = -ENOMEM;
1583 xenbus_dev_fatal(dev, err, "allocating rx ring page");
Wei Liu1ca29832013-05-20 01:05:12 +00001584 goto alloc_rx_ring_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001585 }
1586 SHARED_RING_INIT(rxs);
1587 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1588
1589 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
Wei Liu1ca29832013-05-20 01:05:12 +00001590 if (err < 0)
1591 goto grant_rx_ring_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001592 info->rx_ring_ref = err;
1593
Wei Liud634bf22013-05-22 06:34:46 +00001594 if (feature_split_evtchn)
1595 err = setup_netfront_split(info);
1596 /* setup single event channel if
1597 * a) feature-split-event-channels == 0
1598 * b) feature-split-event-channels == 1 but failed to setup
1599 */
1600 if (!feature_split_evtchn || (feature_split_evtchn && err))
1601 err = setup_netfront_single(info);
1602
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001603 if (err)
Wei Liu1ca29832013-05-20 01:05:12 +00001604 goto alloc_evtchn_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001605
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001606 return 0;
1607
Wei Liu1ca29832013-05-20 01:05:12 +00001608 /* If we fail to setup netfront, it is safe to just revoke access to
1609 * granted pages because backend is not accessing it at this point.
1610 */
Wei Liu1ca29832013-05-20 01:05:12 +00001611alloc_evtchn_fail:
1612 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1613grant_rx_ring_fail:
1614 free_page((unsigned long)rxs);
1615alloc_rx_ring_fail:
1616 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1617grant_tx_ring_fail:
1618 free_page((unsigned long)txs);
1619fail:
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001620 return err;
1621}
1622
1623/* Common code used when first setting up, and when resuming. */
Ian Campbellf502bf22010-08-18 23:27:49 +00001624static int talk_to_netback(struct xenbus_device *dev,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001625 struct netfront_info *info)
1626{
1627 const char *message;
1628 struct xenbus_transaction xbt;
1629 int err;
1630
1631 /* Create shared ring, alloc event channel. */
1632 err = setup_netfront(dev, info);
1633 if (err)
1634 goto out;
1635
1636again:
1637 err = xenbus_transaction_start(&xbt);
1638 if (err) {
1639 xenbus_dev_fatal(dev, err, "starting transaction");
1640 goto destroy_ring;
1641 }
1642
1643 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1644 info->tx_ring_ref);
1645 if (err) {
1646 message = "writing tx ring-ref";
1647 goto abort_transaction;
1648 }
1649 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1650 info->rx_ring_ref);
1651 if (err) {
1652 message = "writing rx ring-ref";
1653 goto abort_transaction;
1654 }
Wei Liud634bf22013-05-22 06:34:46 +00001655
1656 if (info->tx_evtchn == info->rx_evtchn) {
1657 err = xenbus_printf(xbt, dev->nodename,
1658 "event-channel", "%u", info->tx_evtchn);
1659 if (err) {
1660 message = "writing event-channel";
1661 goto abort_transaction;
1662 }
1663 } else {
1664 err = xenbus_printf(xbt, dev->nodename,
1665 "event-channel-tx", "%u", info->tx_evtchn);
1666 if (err) {
1667 message = "writing event-channel-tx";
1668 goto abort_transaction;
1669 }
1670 err = xenbus_printf(xbt, dev->nodename,
1671 "event-channel-rx", "%u", info->rx_evtchn);
1672 if (err) {
1673 message = "writing event-channel-rx";
1674 goto abort_transaction;
1675 }
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001676 }
1677
1678 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1679 1);
1680 if (err) {
1681 message = "writing request-rx-copy";
1682 goto abort_transaction;
1683 }
1684
1685 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1686 if (err) {
1687 message = "writing feature-rx-notify";
1688 goto abort_transaction;
1689 }
1690
1691 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1692 if (err) {
1693 message = "writing feature-sg";
1694 goto abort_transaction;
1695 }
1696
1697 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1698 if (err) {
1699 message = "writing feature-gso-tcpv4";
1700 goto abort_transaction;
1701 }
1702
Paul Durrant2c0057d2014-01-15 17:30:33 +00001703 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1704 if (err) {
1705 message = "writing feature-gso-tcpv6";
1706 goto abort_transaction;
1707 }
1708
1709 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1710 "1");
1711 if (err) {
1712 message = "writing feature-ipv6-csum-offload";
1713 goto abort_transaction;
1714 }
1715
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001716 err = xenbus_transaction_end(xbt, 0);
1717 if (err) {
1718 if (err == -EAGAIN)
1719 goto again;
1720 xenbus_dev_fatal(dev, err, "completing transaction");
1721 goto destroy_ring;
1722 }
1723
1724 return 0;
1725
1726 abort_transaction:
1727 xenbus_transaction_end(xbt, 1);
1728 xenbus_dev_fatal(dev, err, "%s", message);
1729 destroy_ring:
1730 xennet_disconnect_backend(info);
1731 out:
1732 return err;
1733}
1734
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001735static int xennet_connect(struct net_device *dev)
1736{
1737 struct netfront_info *np = netdev_priv(dev);
1738 int i, requeue_idx, err;
1739 struct sk_buff *skb;
1740 grant_ref_t ref;
1741 struct xen_netif_rx_request *req;
1742 unsigned int feature_rx_copy;
1743
1744 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1745 "feature-rx-copy", "%u", &feature_rx_copy);
1746 if (err != 1)
1747 feature_rx_copy = 0;
1748
1749 if (!feature_rx_copy) {
1750 dev_info(&dev->dev,
Joe Perches898eb712007-10-18 03:06:30 -07001751 "backend does not support copying receive path\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001752 return -ENODEV;
1753 }
1754
Ian Campbellf502bf22010-08-18 23:27:49 +00001755 err = talk_to_netback(np->xbdev, np);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001756 if (err)
1757 return err;
1758
Ian Campbell1ba37c52011-05-24 21:56:02 +00001759 rtnl_lock();
Michał Mirosławfb507932011-03-31 01:01:35 +00001760 netdev_update_features(dev);
Ian Campbell1ba37c52011-05-24 21:56:02 +00001761 rtnl_unlock();
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001762
1763 spin_lock_bh(&np->rx_lock);
1764 spin_lock_irq(&np->tx_lock);
1765
1766 /* Step 1: Discard all pending TX packet fragments. */
1767 xennet_release_tx_bufs(np);
1768
1769 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1770 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
Ian Campbell01c68022011-10-05 00:28:47 +00001771 skb_frag_t *frag;
1772 const struct page *page;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001773 if (!np->rx_skbs[i])
1774 continue;
1775
1776 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1777 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1778 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1779
Ian Campbell01c68022011-10-05 00:28:47 +00001780 frag = &skb_shinfo(skb)->frags[0];
1781 page = skb_frag_page(frag);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001782 gnttab_grant_foreign_access_ref(
1783 ref, np->xbdev->otherend_id,
Ian Campbell01c68022011-10-05 00:28:47 +00001784 pfn_to_mfn(page_to_pfn(page)),
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001785 0);
1786 req->gref = ref;
1787 req->id = requeue_idx;
1788
1789 requeue_idx++;
1790 }
1791
1792 np->rx.req_prod_pvt = requeue_idx;
1793
1794 /*
1795 * Step 3: All public and private state should now be sane. Get
1796 * ready to start sending and receiving packets and give the driver
1797 * domain a kick because we've probably just requeued some
1798 * packets.
1799 */
1800 netif_carrier_on(np->netdev);
Wei Liud634bf22013-05-22 06:34:46 +00001801 notify_remote_via_irq(np->tx_irq);
1802 if (np->tx_irq != np->rx_irq)
1803 notify_remote_via_irq(np->rx_irq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001804 xennet_tx_buf_gc(dev);
1805 xennet_alloc_rx_buffers(dev);
1806
1807 spin_unlock_irq(&np->tx_lock);
1808 spin_unlock_bh(&np->rx_lock);
1809
1810 return 0;
1811}
1812
1813/**
1814 * Callback received when the backend's state changes.
1815 */
Ian Campbellf502bf22010-08-18 23:27:49 +00001816static void netback_changed(struct xenbus_device *dev,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001817 enum xenbus_state backend_state)
1818{
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001819 struct netfront_info *np = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001820 struct net_device *netdev = np->netdev;
1821
1822 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1823
1824 switch (backend_state) {
1825 case XenbusStateInitialising:
1826 case XenbusStateInitialised:
Noboru Iwamatsub78c9512009-10-13 17:22:29 -04001827 case XenbusStateReconfiguring:
1828 case XenbusStateReconfigured:
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001829 case XenbusStateUnknown:
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001830 break;
1831
1832 case XenbusStateInitWait:
1833 if (dev->state != XenbusStateInitialising)
1834 break;
1835 if (xennet_connect(netdev) != 0)
1836 break;
1837 xenbus_switch_state(dev, XenbusStateConnected);
Laszlo Ersek08e34eb2011-12-11 01:48:59 +00001838 break;
1839
1840 case XenbusStateConnected:
Amerigo Wangee89bab2012-08-09 22:14:56 +00001841 netdev_notify_peers(netdev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001842 break;
1843
David Vrabelbce3ea82014-02-04 18:50:26 +00001844 case XenbusStateClosed:
1845 if (dev->state == XenbusStateClosed)
1846 break;
1847 /* Missed the backend's CLOSING state -- fallthrough */
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001848 case XenbusStateClosing:
1849 xenbus_frontend_closed(dev);
1850 break;
1851 }
1852}
1853
Ian Campbelle0ce4af2011-01-27 04:14:03 +00001854static const struct xennet_stat {
1855 char name[ETH_GSTRING_LEN];
1856 u16 offset;
1857} xennet_stats[] = {
1858 {
1859 "rx_gso_checksum_fixup",
1860 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1861 },
1862};
1863
1864static int xennet_get_sset_count(struct net_device *dev, int string_set)
1865{
1866 switch (string_set) {
1867 case ETH_SS_STATS:
1868 return ARRAY_SIZE(xennet_stats);
1869 default:
1870 return -EINVAL;
1871 }
1872}
1873
1874static void xennet_get_ethtool_stats(struct net_device *dev,
1875 struct ethtool_stats *stats, u64 * data)
1876{
1877 void *np = netdev_priv(dev);
1878 int i;
1879
1880 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
Eric Dumazet48f26d52011-03-14 21:05:40 -07001881 data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
Ian Campbelle0ce4af2011-01-27 04:14:03 +00001882}
1883
1884static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1885{
1886 int i;
1887
1888 switch (stringset) {
1889 case ETH_SS_STATS:
1890 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1891 memcpy(data + i * ETH_GSTRING_LEN,
1892 xennet_stats[i].name, ETH_GSTRING_LEN);
1893 break;
1894 }
1895}
1896
Stephen Hemminger0fc0b732009-09-02 01:03:33 -07001897static const struct ethtool_ops xennet_ethtool_ops =
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001898{
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001899 .get_link = ethtool_op_get_link,
Ian Campbelle0ce4af2011-01-27 04:14:03 +00001900
1901 .get_sset_count = xennet_get_sset_count,
1902 .get_ethtool_stats = xennet_get_ethtool_stats,
1903 .get_strings = xennet_get_strings,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001904};
1905
1906#ifdef CONFIG_SYSFS
1907static ssize_t show_rxbuf_min(struct device *dev,
1908 struct device_attribute *attr, char *buf)
1909{
1910 struct net_device *netdev = to_net_dev(dev);
1911 struct netfront_info *info = netdev_priv(netdev);
1912
1913 return sprintf(buf, "%u\n", info->rx_min_target);
1914}
1915
1916static ssize_t store_rxbuf_min(struct device *dev,
1917 struct device_attribute *attr,
1918 const char *buf, size_t len)
1919{
1920 struct net_device *netdev = to_net_dev(dev);
1921 struct netfront_info *np = netdev_priv(netdev);
1922 char *endp;
1923 unsigned long target;
1924
1925 if (!capable(CAP_NET_ADMIN))
1926 return -EPERM;
1927
1928 target = simple_strtoul(buf, &endp, 0);
1929 if (endp == buf)
1930 return -EBADMSG;
1931
1932 if (target < RX_MIN_TARGET)
1933 target = RX_MIN_TARGET;
1934 if (target > RX_MAX_TARGET)
1935 target = RX_MAX_TARGET;
1936
1937 spin_lock_bh(&np->rx_lock);
1938 if (target > np->rx_max_target)
1939 np->rx_max_target = target;
1940 np->rx_min_target = target;
1941 if (target > np->rx_target)
1942 np->rx_target = target;
1943
1944 xennet_alloc_rx_buffers(netdev);
1945
1946 spin_unlock_bh(&np->rx_lock);
1947 return len;
1948}
1949
1950static ssize_t show_rxbuf_max(struct device *dev,
1951 struct device_attribute *attr, char *buf)
1952{
1953 struct net_device *netdev = to_net_dev(dev);
1954 struct netfront_info *info = netdev_priv(netdev);
1955
1956 return sprintf(buf, "%u\n", info->rx_max_target);
1957}
1958
1959static ssize_t store_rxbuf_max(struct device *dev,
1960 struct device_attribute *attr,
1961 const char *buf, size_t len)
1962{
1963 struct net_device *netdev = to_net_dev(dev);
1964 struct netfront_info *np = netdev_priv(netdev);
1965 char *endp;
1966 unsigned long target;
1967
1968 if (!capable(CAP_NET_ADMIN))
1969 return -EPERM;
1970
1971 target = simple_strtoul(buf, &endp, 0);
1972 if (endp == buf)
1973 return -EBADMSG;
1974
1975 if (target < RX_MIN_TARGET)
1976 target = RX_MIN_TARGET;
1977 if (target > RX_MAX_TARGET)
1978 target = RX_MAX_TARGET;
1979
1980 spin_lock_bh(&np->rx_lock);
1981 if (target < np->rx_min_target)
1982 np->rx_min_target = target;
1983 np->rx_max_target = target;
1984 if (target < np->rx_target)
1985 np->rx_target = target;
1986
1987 xennet_alloc_rx_buffers(netdev);
1988
1989 spin_unlock_bh(&np->rx_lock);
1990 return len;
1991}
1992
1993static ssize_t show_rxbuf_cur(struct device *dev,
1994 struct device_attribute *attr, char *buf)
1995{
1996 struct net_device *netdev = to_net_dev(dev);
1997 struct netfront_info *info = netdev_priv(netdev);
1998
1999 return sprintf(buf, "%u\n", info->rx_target);
2000}
2001
2002static struct device_attribute xennet_attrs[] = {
2003 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2004 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2005 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2006};
2007
2008static int xennet_sysfs_addif(struct net_device *netdev)
2009{
2010 int i;
2011 int err;
2012
2013 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2014 err = device_create_file(&netdev->dev,
2015 &xennet_attrs[i]);
2016 if (err)
2017 goto fail;
2018 }
2019 return 0;
2020
2021 fail:
2022 while (--i >= 0)
2023 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2024 return err;
2025}
2026
2027static void xennet_sysfs_delif(struct net_device *netdev)
2028{
2029 int i;
2030
2031 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2032 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2033}
2034
2035#endif /* CONFIG_SYSFS */
2036
Jan Beulich73db1442011-12-22 09:08:13 +00002037static const struct xenbus_device_id netfront_ids[] = {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002038 { "vif" },
2039 { "" }
2040};
2041
2042
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05002043static int xennet_remove(struct xenbus_device *dev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002044{
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07002045 struct netfront_info *info = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002046
2047 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2048
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002049 xennet_disconnect_backend(info);
2050
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002051 xennet_sysfs_delif(info->netdev);
2052
Ian Campbell6bc96d02012-06-25 22:48:41 +00002053 unregister_netdev(info->netdev);
2054
2055 del_timer_sync(&info->rx_refill_timer);
2056
stephen hemmingere00f85b2011-06-21 05:35:31 +00002057 free_percpu(info->stats);
2058
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002059 free_netdev(info->netdev);
2060
2061 return 0;
2062}
2063
Jan Beulich73db1442011-12-22 09:08:13 +00002064static DEFINE_XENBUS_DRIVER(netfront, ,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002065 .probe = netfront_probe,
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05002066 .remove = xennet_remove,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002067 .resume = netfront_resume,
Ian Campbellf502bf22010-08-18 23:27:49 +00002068 .otherend_changed = netback_changed,
Jan Beulich73db1442011-12-22 09:08:13 +00002069);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002070
2071static int __init netif_init(void)
2072{
Jeremy Fitzhardinge6e833582008-08-19 13:16:17 -07002073 if (!xen_domain())
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002074 return -ENODEV;
2075
Konrad Rzeszutek Wilk51c71a32013-11-26 15:05:40 -05002076 if (!xen_has_pv_nic_devices())
Igor Mammedovb9136d22012-03-21 15:08:38 +01002077 return -ENODEV;
2078
Joe Perches383eda32013-06-27 21:57:49 -07002079 pr_info("Initialising Xen virtual ethernet driver\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002080
Al Viroffb78a22008-11-22 17:38:14 +00002081 return xenbus_register_frontend(&netfront_driver);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002082}
2083module_init(netif_init);
2084
2085
2086static void __exit netif_exit(void)
2087{
Al Viroffb78a22008-11-22 17:38:14 +00002088 xenbus_unregister_driver(&netfront_driver);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002089}
2090module_exit(netif_exit);
2091
2092MODULE_DESCRIPTION("Xen virtual network device frontend");
2093MODULE_LICENSE("GPL");
Mark McLoughlind2f0c522008-04-02 10:54:05 -07002094MODULE_ALIAS("xen:vif");
Mark McLoughlin4f93f09b2008-04-02 10:54:06 -07002095MODULE_ALIAS("xennet");