blob: 62238a08cb517e6ed30d8df58c33c49dcf95286b [file] [log] [blame]
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001/*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/skbuff.h>
37#include <linux/ethtool.h>
38#include <linux/if_ether.h>
Wei Liu9ecd1a72013-04-22 02:20:41 +000039#include <net/tcp.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070040#include <linux/udp.h>
41#include <linux/moduleparam.h>
42#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070044#include <net/ip.h>
45
Stefano Stabellinica981632012-08-08 17:21:23 +000046#include <asm/xen/page.h>
Jeremy Fitzhardinge1ccbf532009-10-06 15:11:14 -070047#include <xen/xen.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070048#include <xen/xenbus.h>
49#include <xen/events.h>
50#include <xen/page.h>
Igor Mammedovb9136d22012-03-21 15:08:38 +010051#include <xen/platform_pci.h>
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070052#include <xen/grant_table.h>
53
54#include <xen/interface/io/netif.h>
55#include <xen/interface/memory.h>
56#include <xen/interface/grant_table.h>
57
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070058static const struct ethtool_ops xennet_ethtool_ops;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070059
60struct netfront_cb {
Ian Campbell36832432012-08-22 00:26:47 +000061 int pull_to;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070062};
63
64#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
65
66#define RX_COPY_THRESHOLD 256
67
68#define GRANT_INVALID_REF 0
69
Jeremy Fitzhardinge667c78af2010-12-08 12:39:12 -080070#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
71#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
Wei Liu40206dd2012-01-26 07:23:23 +000072#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070073
stephen hemmingere00f85b2011-06-21 05:35:31 +000074struct netfront_stats {
75 u64 rx_packets;
76 u64 tx_packets;
77 u64 rx_bytes;
78 u64 tx_bytes;
79 struct u64_stats_sync syncp;
80};
81
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070082struct netfront_info {
83 struct list_head list;
84 struct net_device *netdev;
85
Stephen Hemmingerbea33482007-10-03 16:41:36 -070086 struct napi_struct napi;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070087
Wei Liud634bf22013-05-22 06:34:46 +000088 /* Split event channels support, tx_* == rx_* when using
89 * single event channel.
90 */
91 unsigned int tx_evtchn, rx_evtchn;
92 unsigned int tx_irq, rx_irq;
93 /* Only used when split event channels support is enabled */
94 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
95 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
96
Jeremy Fitzhardinge84284d32007-10-15 12:59:53 -070097 struct xenbus_device *xbdev;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -070098
99 spinlock_t tx_lock;
Jeremy Fitzhardinge84284d32007-10-15 12:59:53 -0700100 struct xen_netif_tx_front_ring tx;
101 int tx_ring_ref;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700102
103 /*
104 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
105 * are linked from tx_skb_freelist through skb_entry.link.
106 *
107 * NB. Freelist index entries are always going to be less than
108 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
109 * greater than PAGE_OFFSET: we use this property to distinguish
110 * them.
111 */
112 union skb_entry {
113 struct sk_buff *skb;
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700114 unsigned long link;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700115 } tx_skbs[NET_TX_RING_SIZE];
116 grant_ref_t gref_tx_head;
117 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
118 unsigned tx_skb_freelist;
119
Jeremy Fitzhardinge84284d32007-10-15 12:59:53 -0700120 spinlock_t rx_lock ____cacheline_aligned_in_smp;
121 struct xen_netif_rx_front_ring rx;
122 int rx_ring_ref;
123
124 /* Receive-ring batched refills. */
125#define RX_MIN_TARGET 8
126#define RX_DFL_MIN_TARGET 64
127#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
128 unsigned rx_min_target, rx_max_target, rx_target;
129 struct sk_buff_head rx_batch;
130
131 struct timer_list rx_refill_timer;
132
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700133 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
134 grant_ref_t gref_rx_head;
135 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
136
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700137 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
138 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
139 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000140
141 /* Statistics */
stephen hemmingere00f85b2011-06-21 05:35:31 +0000142 struct netfront_stats __percpu *stats;
143
Ian Campbelle9a799e2011-03-10 07:04:18 +0000144 unsigned long rx_gso_checksum_fixup;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700145};
146
147struct netfront_rx_info {
148 struct xen_netif_rx_response rx;
149 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
150};
151
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700152static void skb_entry_set_link(union skb_entry *list, unsigned short id)
153{
154 list->link = id;
155}
156
157static int skb_entry_is_link(const union skb_entry *list)
158{
159 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
Eric Dumazet807540b2010-09-23 05:40:09 +0000160 return (unsigned long)list->skb < PAGE_OFFSET;
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700161}
162
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700163/*
164 * Access macros for acquiring freeing slots in tx_skbs[].
165 */
166
167static void add_id_to_freelist(unsigned *head, union skb_entry *list,
168 unsigned short id)
169{
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -0700170 skb_entry_set_link(&list[id], *head);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700171 *head = id;
172}
173
174static unsigned short get_id_from_freelist(unsigned *head,
175 union skb_entry *list)
176{
177 unsigned int id = *head;
178 *head = list[id].link;
179 return id;
180}
181
182static int xennet_rxidx(RING_IDX idx)
183{
184 return idx & (NET_RX_RING_SIZE - 1);
185}
186
187static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
188 RING_IDX ri)
189{
190 int i = xennet_rxidx(ri);
191 struct sk_buff *skb = np->rx_skbs[i];
192 np->rx_skbs[i] = NULL;
193 return skb;
194}
195
196static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
197 RING_IDX ri)
198{
199 int i = xennet_rxidx(ri);
200 grant_ref_t ref = np->grant_rx_ref[i];
201 np->grant_rx_ref[i] = GRANT_INVALID_REF;
202 return ref;
203}
204
205#ifdef CONFIG_SYSFS
206static int xennet_sysfs_addif(struct net_device *netdev);
207static void xennet_sysfs_delif(struct net_device *netdev);
208#else /* !CONFIG_SYSFS */
209#define xennet_sysfs_addif(dev) (0)
210#define xennet_sysfs_delif(dev) do { } while (0)
211#endif
212
Michał Mirosław3ad9b352011-11-16 14:05:33 +0000213static bool xennet_can_sg(struct net_device *dev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700214{
Michał Mirosław3ad9b352011-11-16 14:05:33 +0000215 return dev->features & NETIF_F_SG;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700216}
217
218
219static void rx_refill_timeout(unsigned long data)
220{
221 struct net_device *dev = (struct net_device *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700222 struct netfront_info *np = netdev_priv(dev);
Ben Hutchings288379f2009-01-19 16:43:59 -0800223 napi_schedule(&np->napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700224}
225
226static int netfront_tx_slot_available(struct netfront_info *np)
227{
Eric Dumazet807540b2010-09-23 05:40:09 +0000228 return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
229 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700230}
231
232static void xennet_maybe_wake_tx(struct net_device *dev)
233{
234 struct netfront_info *np = netdev_priv(dev);
235
236 if (unlikely(netif_queue_stopped(dev)) &&
237 netfront_tx_slot_available(np) &&
238 likely(netif_running(dev)))
239 netif_wake_queue(dev);
240}
241
242static void xennet_alloc_rx_buffers(struct net_device *dev)
243{
244 unsigned short id;
245 struct netfront_info *np = netdev_priv(dev);
246 struct sk_buff *skb;
247 struct page *page;
248 int i, batch_target, notify;
249 RING_IDX req_prod = np->rx.req_prod_pvt;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700250 grant_ref_t ref;
251 unsigned long pfn;
252 void *vaddr;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700253 struct xen_netif_rx_request *req;
254
255 if (unlikely(!netif_carrier_ok(dev)))
256 return;
257
258 /*
259 * Allocate skbuffs greedily, even though we batch updates to the
260 * receive ring. This creates a less bursty demand on the memory
261 * allocator, so should reduce the chance of failed allocation requests
262 * both for ourself and for other kernel subsystems.
263 */
264 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
265 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
Isaku Yamahata617a20b2008-10-14 17:50:42 -0700266 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700267 GFP_ATOMIC | __GFP_NOWARN);
268 if (unlikely(!skb))
269 goto no_skb;
270
Isaku Yamahata617a20b2008-10-14 17:50:42 -0700271 /* Align ip header to a 16 bytes boundary */
272 skb_reserve(skb, NET_IP_ALIGN);
273
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700274 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
275 if (!page) {
276 kfree_skb(skb);
277no_skb:
278 /* Any skbuffs queued for refill? Force them out. */
279 if (i != 0)
280 goto refill;
281 /* Could not allocate any skbuffs. Try again later. */
282 mod_timer(&np->rx_refill_timer,
283 jiffies + (HZ/10));
284 break;
285 }
286
Ian Campbell01c68022011-10-05 00:28:47 +0000287 __skb_fill_page_desc(skb, 0, page, 0, 0);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700288 skb_shinfo(skb)->nr_frags = 1;
289 __skb_queue_tail(&np->rx_batch, skb);
290 }
291
292 /* Is the batch large enough to be worthwhile? */
293 if (i < (np->rx_target/2)) {
294 if (req_prod > np->rx.sring->req_prod)
295 goto push;
296 return;
297 }
298
299 /* Adjust our fill target if we risked running out of buffers. */
300 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
301 ((np->rx_target *= 2) > np->rx_max_target))
302 np->rx_target = np->rx_max_target;
303
304 refill:
Jeremy Fitzhardinge5dcddfa2007-08-07 14:56:42 -0700305 for (i = 0; ; i++) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700306 skb = __skb_dequeue(&np->rx_batch);
307 if (skb == NULL)
308 break;
309
310 skb->dev = dev;
311
312 id = xennet_rxidx(req_prod + i);
313
314 BUG_ON(np->rx_skbs[id]);
315 np->rx_skbs[id] = skb;
316
317 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
318 BUG_ON((signed short)ref < 0);
319 np->grant_rx_ref[id] = ref;
320
Ian Campbell01c68022011-10-05 00:28:47 +0000321 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
322 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700323
324 req = RING_GET_REQUEST(&np->rx, req_prod + i);
325 gnttab_grant_foreign_access_ref(ref,
326 np->xbdev->otherend_id,
327 pfn_to_mfn(pfn),
328 0);
329
330 req->id = id;
331 req->gref = ref;
332 }
333
Jeremy Fitzhardinge5dcddfa2007-08-07 14:56:42 -0700334 wmb(); /* barrier so backend seens requests */
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700335
336 /* Above is a suitable barrier to ensure backend will see requests. */
337 np->rx.req_prod_pvt = req_prod + i;
338 push:
339 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
340 if (notify)
Wei Liud634bf22013-05-22 06:34:46 +0000341 notify_remote_via_irq(np->rx_irq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700342}
343
344static int xennet_open(struct net_device *dev)
345{
346 struct netfront_info *np = netdev_priv(dev);
347
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700348 napi_enable(&np->napi);
349
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700350 spin_lock_bh(&np->rx_lock);
351 if (netif_carrier_ok(dev)) {
352 xennet_alloc_rx_buffers(dev);
353 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
354 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
Ben Hutchings288379f2009-01-19 16:43:59 -0800355 napi_schedule(&np->napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700356 }
357 spin_unlock_bh(&np->rx_lock);
358
Eduardo Habkost0b1ab1b2008-07-31 17:36:55 -0300359 netif_start_queue(dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700360
361 return 0;
362}
363
364static void xennet_tx_buf_gc(struct net_device *dev)
365{
366 RING_IDX cons, prod;
367 unsigned short id;
368 struct netfront_info *np = netdev_priv(dev);
369 struct sk_buff *skb;
370
371 BUG_ON(!netif_carrier_ok(dev));
372
373 do {
374 prod = np->tx.sring->rsp_prod;
375 rmb(); /* Ensure we see responses up to 'rp'. */
376
377 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
378 struct xen_netif_tx_response *txrsp;
379
380 txrsp = RING_GET_RESPONSE(&np->tx, cons);
Ian Campbellf942dc22011-03-15 00:06:18 +0000381 if (txrsp->status == XEN_NETIF_RSP_NULL)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700382 continue;
383
384 id = txrsp->id;
385 skb = np->tx_skbs[id].skb;
386 if (unlikely(gnttab_query_foreign_access(
387 np->grant_tx_ref[id]) != 0)) {
388 printk(KERN_ALERT "xennet_tx_buf_gc: warning "
389 "-- grant still in use by backend "
390 "domain.\n");
391 BUG();
392 }
393 gnttab_end_foreign_access_ref(
394 np->grant_tx_ref[id], GNTMAP_readonly);
395 gnttab_release_grant_reference(
396 &np->gref_tx_head, np->grant_tx_ref[id]);
397 np->grant_tx_ref[id] = GRANT_INVALID_REF;
398 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
399 dev_kfree_skb_irq(skb);
400 }
401
402 np->tx.rsp_cons = prod;
403
404 /*
405 * Set a new event, then check for race with update of tx_cons.
406 * Note that it is essential to schedule a callback, no matter
407 * how few buffers are pending. Even if there is space in the
408 * transmit ring, higher layers may be blocked because too much
409 * data is outstanding: in such cases notification from Xen is
410 * likely to be the only kick that we'll get.
411 */
412 np->tx.sring->rsp_event =
413 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
414 mb(); /* update shared area */
415 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
416
417 xennet_maybe_wake_tx(dev);
418}
419
420static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
421 struct xen_netif_tx_request *tx)
422{
423 struct netfront_info *np = netdev_priv(dev);
424 char *data = skb->data;
425 unsigned long mfn;
426 RING_IDX prod = np->tx.req_prod_pvt;
427 int frags = skb_shinfo(skb)->nr_frags;
428 unsigned int offset = offset_in_page(data);
429 unsigned int len = skb_headlen(skb);
430 unsigned int id;
431 grant_ref_t ref;
432 int i;
433
434 /* While the header overlaps a page boundary (including being
435 larger than a page), split it it into page-sized chunks. */
436 while (len > PAGE_SIZE - offset) {
437 tx->size = PAGE_SIZE - offset;
Ian Campbellf942dc22011-03-15 00:06:18 +0000438 tx->flags |= XEN_NETTXF_more_data;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700439 len -= tx->size;
440 data += tx->size;
441 offset = 0;
442
443 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
444 np->tx_skbs[id].skb = skb_get(skb);
445 tx = RING_GET_REQUEST(&np->tx, prod++);
446 tx->id = id;
447 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
448 BUG_ON((signed short)ref < 0);
449
450 mfn = virt_to_mfn(data);
451 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
452 mfn, GNTMAP_readonly);
453
454 tx->gref = np->grant_tx_ref[id] = ref;
455 tx->offset = offset;
456 tx->size = len;
457 tx->flags = 0;
458 }
459
460 /* Grant backend access to each skb fragment page. */
461 for (i = 0; i < frags; i++) {
462 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
Ian Campbellf36c3742012-11-21 02:02:16 +0000463 struct page *page = skb_frag_page(frag);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700464
Ian Campbellf36c3742012-11-21 02:02:16 +0000465 len = skb_frag_size(frag);
466 offset = frag->page_offset;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700467
Ian Campbellf36c3742012-11-21 02:02:16 +0000468 /* Data must not cross a page boundary. */
469 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700470
Ian Campbellf36c3742012-11-21 02:02:16 +0000471 /* Skip unused frames from start of page */
472 page += offset >> PAGE_SHIFT;
473 offset &= ~PAGE_MASK;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700474
Ian Campbellf36c3742012-11-21 02:02:16 +0000475 while (len > 0) {
476 unsigned long bytes;
477
478 BUG_ON(offset >= PAGE_SIZE);
479
480 bytes = PAGE_SIZE - offset;
481 if (bytes > len)
482 bytes = len;
483
484 tx->flags |= XEN_NETTXF_more_data;
485
486 id = get_id_from_freelist(&np->tx_skb_freelist,
487 np->tx_skbs);
488 np->tx_skbs[id].skb = skb_get(skb);
489 tx = RING_GET_REQUEST(&np->tx, prod++);
490 tx->id = id;
491 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
492 BUG_ON((signed short)ref < 0);
493
494 mfn = pfn_to_mfn(page_to_pfn(page));
495 gnttab_grant_foreign_access_ref(ref,
496 np->xbdev->otherend_id,
497 mfn, GNTMAP_readonly);
498
499 tx->gref = np->grant_tx_ref[id] = ref;
500 tx->offset = offset;
501 tx->size = bytes;
502 tx->flags = 0;
503
504 offset += bytes;
505 len -= bytes;
506
507 /* Next frame */
508 if (offset == PAGE_SIZE && len) {
509 BUG_ON(!PageCompound(page));
510 page++;
511 offset = 0;
512 }
513 }
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700514 }
515
516 np->tx.req_prod_pvt = prod;
517}
518
Ian Campbellf36c3742012-11-21 02:02:16 +0000519/*
520 * Count how many ring slots are required to send the frags of this
521 * skb. Each frag might be a compound page.
522 */
523static int xennet_count_skb_frag_slots(struct sk_buff *skb)
524{
525 int i, frags = skb_shinfo(skb)->nr_frags;
526 int pages = 0;
527
528 for (i = 0; i < frags; i++) {
529 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
530 unsigned long size = skb_frag_size(frag);
531 unsigned long offset = frag->page_offset;
532
533 /* Skip unused frames from start of page */
534 offset &= ~PAGE_MASK;
535
536 pages += PFN_UP(offset + size);
537 }
538
539 return pages;
540}
541
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700542static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
543{
544 unsigned short id;
545 struct netfront_info *np = netdev_priv(dev);
stephen hemmingere00f85b2011-06-21 05:35:31 +0000546 struct netfront_stats *stats = this_cpu_ptr(np->stats);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700547 struct xen_netif_tx_request *tx;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700548 char *data = skb->data;
549 RING_IDX i;
550 grant_ref_t ref;
551 unsigned long mfn;
552 int notify;
Ian Campbellf36c3742012-11-21 02:02:16 +0000553 int slots;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700554 unsigned int offset = offset_in_page(data);
555 unsigned int len = skb_headlen(skb);
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000556 unsigned long flags;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700557
Wei Liu9ecd1a72013-04-22 02:20:41 +0000558 /* If skb->len is too big for wire format, drop skb and alert
559 * user about misconfiguration.
560 */
561 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
562 net_alert_ratelimited(
563 "xennet: skb->len = %u, too big for wire format\n",
564 skb->len);
565 goto drop;
566 }
567
Ian Campbellf36c3742012-11-21 02:02:16 +0000568 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
569 xennet_count_skb_frag_slots(skb);
570 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
571 net_alert_ratelimited(
572 "xennet: skb rides the rocket: %d slots\n", slots);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700573 goto drop;
574 }
575
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000576 spin_lock_irqsave(&np->tx_lock, flags);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700577
578 if (unlikely(!netif_carrier_ok(dev) ||
Ian Campbellf36c3742012-11-21 02:02:16 +0000579 (slots > 1 && !xennet_can_sg(dev)) ||
Jesse Grossfc741212011-01-09 06:23:32 +0000580 netif_needs_gso(skb, netif_skb_features(skb)))) {
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000581 spin_unlock_irqrestore(&np->tx_lock, flags);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700582 goto drop;
583 }
584
585 i = np->tx.req_prod_pvt;
586
587 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
588 np->tx_skbs[id].skb = skb;
589
590 tx = RING_GET_REQUEST(&np->tx, i);
591
592 tx->id = id;
593 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
594 BUG_ON((signed short)ref < 0);
595 mfn = virt_to_mfn(data);
596 gnttab_grant_foreign_access_ref(
597 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
598 tx->gref = np->grant_tx_ref[id] = ref;
599 tx->offset = offset;
600 tx->size = len;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700601
602 tx->flags = 0;
603 if (skb->ip_summed == CHECKSUM_PARTIAL)
604 /* local packet? */
Ian Campbellf942dc22011-03-15 00:06:18 +0000605 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700606 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
607 /* remote but checksummed. */
Ian Campbellf942dc22011-03-15 00:06:18 +0000608 tx->flags |= XEN_NETTXF_data_validated;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700609
610 if (skb_shinfo(skb)->gso_size) {
611 struct xen_netif_extra_info *gso;
612
613 gso = (struct xen_netif_extra_info *)
614 RING_GET_REQUEST(&np->tx, ++i);
615
Wei Liue2d617c2013-03-25 01:08:17 +0000616 tx->flags |= XEN_NETTXF_extra_info;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700617
618 gso->u.gso.size = skb_shinfo(skb)->gso_size;
619 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
620 gso->u.gso.pad = 0;
621 gso->u.gso.features = 0;
622
623 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
624 gso->flags = 0;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700625 }
626
627 np->tx.req_prod_pvt = i + 1;
628
629 xennet_make_frags(skb, dev, tx);
630 tx->size = skb->len;
631
632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
633 if (notify)
Wei Liud634bf22013-05-22 06:34:46 +0000634 notify_remote_via_irq(np->tx_irq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700635
stephen hemmingere00f85b2011-06-21 05:35:31 +0000636 u64_stats_update_begin(&stats->syncp);
637 stats->tx_bytes += skb->len;
638 stats->tx_packets++;
639 u64_stats_update_end(&stats->syncp);
Jeremy Fitzhardinge10a273a2007-08-13 12:54:37 -0700640
641 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700642 xennet_tx_buf_gc(dev);
643
644 if (!netfront_tx_slot_available(np))
645 netif_stop_queue(dev);
646
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +0000647 spin_unlock_irqrestore(&np->tx_lock, flags);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700648
Patrick McHardy6ed10652009-06-23 06:03:08 +0000649 return NETDEV_TX_OK;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700650
651 drop:
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700652 dev->stats.tx_dropped++;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700653 dev_kfree_skb(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000654 return NETDEV_TX_OK;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700655}
656
657static int xennet_close(struct net_device *dev)
658{
659 struct netfront_info *np = netdev_priv(dev);
660 netif_stop_queue(np->netdev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700661 napi_disable(&np->napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700662 return 0;
663}
664
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700665static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
666 grant_ref_t ref)
667{
668 int new = xennet_rxidx(np->rx.req_prod_pvt);
669
670 BUG_ON(np->rx_skbs[new]);
671 np->rx_skbs[new] = skb;
672 np->grant_rx_ref[new] = ref;
673 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
674 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
675 np->rx.req_prod_pvt++;
676}
677
678static int xennet_get_extras(struct netfront_info *np,
679 struct xen_netif_extra_info *extras,
680 RING_IDX rp)
681
682{
683 struct xen_netif_extra_info *extra;
684 struct device *dev = &np->netdev->dev;
685 RING_IDX cons = np->rx.rsp_cons;
686 int err = 0;
687
688 do {
689 struct sk_buff *skb;
690 grant_ref_t ref;
691
692 if (unlikely(cons + 1 == rp)) {
693 if (net_ratelimit())
694 dev_warn(dev, "Missing extra info\n");
695 err = -EBADR;
696 break;
697 }
698
699 extra = (struct xen_netif_extra_info *)
700 RING_GET_RESPONSE(&np->rx, ++cons);
701
702 if (unlikely(!extra->type ||
703 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
704 if (net_ratelimit())
705 dev_warn(dev, "Invalid extra type: %d\n",
706 extra->type);
707 err = -EINVAL;
708 } else {
709 memcpy(&extras[extra->type - 1], extra,
710 sizeof(*extra));
711 }
712
713 skb = xennet_get_rx_skb(np, cons);
714 ref = xennet_get_rx_ref(np, cons);
715 xennet_move_rx_slot(np, skb, ref);
716 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
717
718 np->rx.rsp_cons = cons;
719 return err;
720}
721
722static int xennet_get_responses(struct netfront_info *np,
723 struct netfront_rx_info *rinfo, RING_IDX rp,
724 struct sk_buff_head *list)
725{
726 struct xen_netif_rx_response *rx = &rinfo->rx;
727 struct xen_netif_extra_info *extras = rinfo->extras;
728 struct device *dev = &np->netdev->dev;
729 RING_IDX cons = np->rx.rsp_cons;
730 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
731 grant_ref_t ref = xennet_get_rx_ref(np, cons);
732 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
Wei Liu7158ff62013-03-25 01:08:19 +0000733 int slots = 1;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700734 int err = 0;
735 unsigned long ret;
736
Ian Campbellf942dc22011-03-15 00:06:18 +0000737 if (rx->flags & XEN_NETRXF_extra_info) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700738 err = xennet_get_extras(np, extras, rp);
739 cons = np->rx.rsp_cons;
740 }
741
742 for (;;) {
743 if (unlikely(rx->status < 0 ||
744 rx->offset + rx->status > PAGE_SIZE)) {
745 if (net_ratelimit())
746 dev_warn(dev, "rx->offset: %x, size: %u\n",
747 rx->offset, rx->status);
748 xennet_move_rx_slot(np, skb, ref);
749 err = -EINVAL;
750 goto next;
751 }
752
753 /*
754 * This definitely indicates a bug, either in this driver or in
755 * the backend driver. In future this should flag the bad
Wei Liu697089d2013-04-22 02:20:40 +0000756 * situation to the system controller to reboot the backend.
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700757 */
758 if (ref == GRANT_INVALID_REF) {
759 if (net_ratelimit())
760 dev_warn(dev, "Bad rx response id %d.\n",
761 rx->id);
762 err = -EINVAL;
763 goto next;
764 }
765
766 ret = gnttab_end_foreign_access_ref(ref, 0);
767 BUG_ON(!ret);
768
769 gnttab_release_grant_reference(&np->gref_rx_head, ref);
770
771 __skb_queue_tail(list, skb);
772
773next:
Ian Campbellf942dc22011-03-15 00:06:18 +0000774 if (!(rx->flags & XEN_NETRXF_more_data))
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700775 break;
776
Wei Liu7158ff62013-03-25 01:08:19 +0000777 if (cons + slots == rp) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700778 if (net_ratelimit())
Wei Liu7158ff62013-03-25 01:08:19 +0000779 dev_warn(dev, "Need more slots\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700780 err = -ENOENT;
781 break;
782 }
783
Wei Liu7158ff62013-03-25 01:08:19 +0000784 rx = RING_GET_RESPONSE(&np->rx, cons + slots);
785 skb = xennet_get_rx_skb(np, cons + slots);
786 ref = xennet_get_rx_ref(np, cons + slots);
787 slots++;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700788 }
789
Wei Liu7158ff62013-03-25 01:08:19 +0000790 if (unlikely(slots > max)) {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700791 if (net_ratelimit())
Wei Liu697089d2013-04-22 02:20:40 +0000792 dev_warn(dev, "Too many slots\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700793 err = -E2BIG;
794 }
795
796 if (unlikely(err))
Wei Liu7158ff62013-03-25 01:08:19 +0000797 np->rx.rsp_cons = cons + slots;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700798
799 return err;
800}
801
802static int xennet_set_skb_gso(struct sk_buff *skb,
803 struct xen_netif_extra_info *gso)
804{
805 if (!gso->u.gso.size) {
806 if (net_ratelimit())
807 printk(KERN_WARNING "GSO size must not be zero.\n");
808 return -EINVAL;
809 }
810
811 /* Currently only TCPv4 S.O. is supported. */
812 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
813 if (net_ratelimit())
814 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
815 return -EINVAL;
816 }
817
818 skb_shinfo(skb)->gso_size = gso->u.gso.size;
819 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
820
821 /* Header must be checked, and gso_segs computed. */
822 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
823 skb_shinfo(skb)->gso_segs = 0;
824
825 return 0;
826}
827
828static RING_IDX xennet_fill_frags(struct netfront_info *np,
829 struct sk_buff *skb,
830 struct sk_buff_head *list)
831{
832 struct skb_shared_info *shinfo = skb_shinfo(skb);
833 int nr_frags = shinfo->nr_frags;
834 RING_IDX cons = np->rx.rsp_cons;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700835 struct sk_buff *nskb;
836
837 while ((nskb = __skb_dequeue(list))) {
838 struct xen_netif_rx_response *rx =
839 RING_GET_RESPONSE(&np->rx, ++cons);
Ian Campbell01c68022011-10-05 00:28:47 +0000840 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700841
Ian Campbell01c68022011-10-05 00:28:47 +0000842 __skb_fill_page_desc(skb, nr_frags,
843 skb_frag_page(nfrag),
844 rx->offset, rx->status);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700845
846 skb->data_len += rx->status;
847
848 skb_shinfo(nskb)->nr_frags = 0;
849 kfree_skb(nskb);
850
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700851 nr_frags++;
852 }
853
854 shinfo->nr_frags = nr_frags;
855 return cons;
856}
857
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000858static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700859{
860 struct iphdr *iph;
861 unsigned char *th;
862 int err = -EPROTO;
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000863 int recalculate_partial_csum = 0;
864
865 /*
866 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
867 * peers can fail to set NETRXF_csum_blank when sending a GSO
868 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
869 * recalculate the partial checksum.
870 */
871 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
872 struct netfront_info *np = netdev_priv(dev);
873 np->rx_gso_checksum_fixup++;
874 skb->ip_summed = CHECKSUM_PARTIAL;
875 recalculate_partial_csum = 1;
876 }
877
878 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
879 if (skb->ip_summed != CHECKSUM_PARTIAL)
880 return 0;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700881
882 if (skb->protocol != htons(ETH_P_IP))
883 goto out;
884
885 iph = (void *)skb->data;
886 th = skb->data + 4 * iph->ihl;
887 if (th >= skb_tail_pointer(skb))
888 goto out;
889
890 skb->csum_start = th - skb->head;
891 switch (iph->protocol) {
892 case IPPROTO_TCP:
893 skb->csum_offset = offsetof(struct tcphdr, check);
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000894
895 if (recalculate_partial_csum) {
896 struct tcphdr *tcph = (struct tcphdr *)th;
897 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
898 skb->len - iph->ihl*4,
899 IPPROTO_TCP, 0);
900 }
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700901 break;
902 case IPPROTO_UDP:
903 skb->csum_offset = offsetof(struct udphdr, check);
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000904
905 if (recalculate_partial_csum) {
906 struct udphdr *udph = (struct udphdr *)th;
907 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
908 skb->len - iph->ihl*4,
909 IPPROTO_UDP, 0);
910 }
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700911 break;
912 default:
913 if (net_ratelimit())
914 printk(KERN_ERR "Attempting to checksum a non-"
915 "TCP/UDP packet, dropping a protocol"
916 " %d packet", iph->protocol);
917 goto out;
918 }
919
920 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
921 goto out;
922
923 err = 0;
924
925out:
926 return err;
927}
928
929static int handle_incoming_queue(struct net_device *dev,
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700930 struct sk_buff_head *rxq)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700931{
stephen hemmingere00f85b2011-06-21 05:35:31 +0000932 struct netfront_info *np = netdev_priv(dev);
933 struct netfront_stats *stats = this_cpu_ptr(np->stats);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700934 int packets_dropped = 0;
935 struct sk_buff *skb;
936
937 while ((skb = __skb_dequeue(rxq)) != NULL) {
Ian Campbell36832432012-08-22 00:26:47 +0000938 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700939
Ian Campbell36832432012-08-22 00:26:47 +0000940 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700941
942 /* Ethernet work: Delayed to here as it peeks the header. */
943 skb->protocol = eth_type_trans(skb, dev);
944
Ian Campbelle0ce4af2011-01-27 04:14:03 +0000945 if (checksum_setup(dev, skb)) {
946 kfree_skb(skb);
947 packets_dropped++;
948 dev->stats.rx_errors++;
949 continue;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700950 }
951
stephen hemmingere00f85b2011-06-21 05:35:31 +0000952 u64_stats_update_begin(&stats->syncp);
953 stats->rx_packets++;
954 stats->rx_bytes += skb->len;
955 u64_stats_update_end(&stats->syncp);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700956
957 /* Pass it up. */
958 netif_receive_skb(skb);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700959 }
960
961 return packets_dropped;
962}
963
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700964static int xennet_poll(struct napi_struct *napi, int budget)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700965{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700966 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
967 struct net_device *dev = np->netdev;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700968 struct sk_buff *skb;
969 struct netfront_rx_info rinfo;
970 struct xen_netif_rx_response *rx = &rinfo.rx;
971 struct xen_netif_extra_info *extras = rinfo.extras;
972 RING_IDX i, rp;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700973 int work_done;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700974 struct sk_buff_head rxq;
975 struct sk_buff_head errq;
976 struct sk_buff_head tmpq;
977 unsigned long flags;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700978 int err;
979
980 spin_lock(&np->rx_lock);
981
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700982 skb_queue_head_init(&rxq);
983 skb_queue_head_init(&errq);
984 skb_queue_head_init(&tmpq);
985
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -0700986 rp = np->rx.sring->rsp_prod;
987 rmb(); /* Ensure we see queued responses up to 'rp'. */
988
989 i = np->rx.rsp_cons;
990 work_done = 0;
991 while ((i != rp) && (work_done < budget)) {
992 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
993 memset(extras, 0, sizeof(rinfo.extras));
994
995 err = xennet_get_responses(np, &rinfo, rp, &tmpq);
996
997 if (unlikely(err)) {
998err:
999 while ((skb = __skb_dequeue(&tmpq)))
1000 __skb_queue_tail(&errq, skb);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001001 dev->stats.rx_errors++;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001002 i = np->rx.rsp_cons;
1003 continue;
1004 }
1005
1006 skb = __skb_dequeue(&tmpq);
1007
1008 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1009 struct xen_netif_extra_info *gso;
1010 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1011
1012 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1013 __skb_queue_head(&tmpq, skb);
1014 np->rx.rsp_cons += skb_queue_len(&tmpq);
1015 goto err;
1016 }
1017 }
1018
Ian Campbell36832432012-08-22 00:26:47 +00001019 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1020 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1021 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001022
Ian Campbell36832432012-08-22 00:26:47 +00001023 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1024 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1025 skb->data_len = rx->status;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001026
1027 i = xennet_fill_frags(np, skb, &tmpq);
1028
1029 /*
Ian Campbelld9a58a72013-01-07 05:32:06 +00001030 * Truesize is the actual allocation size, even if the
1031 * allocation is only partially used.
1032 */
1033 skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001034 skb->len += skb->data_len;
1035
Ian Campbellf942dc22011-03-15 00:06:18 +00001036 if (rx->flags & XEN_NETRXF_csum_blank)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001037 skb->ip_summed = CHECKSUM_PARTIAL;
Ian Campbellf942dc22011-03-15 00:06:18 +00001038 else if (rx->flags & XEN_NETRXF_data_validated)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001039 skb->ip_summed = CHECKSUM_UNNECESSARY;
1040
1041 __skb_queue_tail(&rxq, skb);
1042
1043 np->rx.rsp_cons = ++i;
1044 work_done++;
1045 }
1046
Wang Chen56cfe5d2008-05-22 18:09:06 +08001047 __skb_queue_purge(&errq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001048
1049 work_done -= handle_incoming_queue(dev, &rxq);
1050
1051 /* If we get a callback with very few responses, reduce fill target. */
1052 /* NB. Note exponential increase, linear decrease. */
1053 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1054 ((3*np->rx_target) / 4)) &&
1055 (--np->rx_target < np->rx_min_target))
1056 np->rx_target = np->rx_min_target;
1057
1058 xennet_alloc_rx_buffers(dev);
1059
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001060 if (work_done < budget) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001061 int more_to_do = 0;
1062
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001063 local_irq_save(flags);
1064
1065 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1066 if (!more_to_do)
Ben Hutchings288379f2009-01-19 16:43:59 -08001067 __napi_complete(napi);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001068
1069 local_irq_restore(flags);
1070 }
1071
1072 spin_unlock(&np->rx_lock);
1073
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001074 return work_done;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001075}
1076
1077static int xennet_change_mtu(struct net_device *dev, int mtu)
1078{
Wei Liu9ecd1a72013-04-22 02:20:41 +00001079 int max = xennet_can_sg(dev) ?
1080 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001081
1082 if (mtu > max)
1083 return -EINVAL;
1084 dev->mtu = mtu;
1085 return 0;
1086}
1087
stephen hemmingere00f85b2011-06-21 05:35:31 +00001088static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1089 struct rtnl_link_stats64 *tot)
1090{
1091 struct netfront_info *np = netdev_priv(dev);
1092 int cpu;
1093
1094 for_each_possible_cpu(cpu) {
1095 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1096 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1097 unsigned int start;
1098
1099 do {
1100 start = u64_stats_fetch_begin_bh(&stats->syncp);
1101
1102 rx_packets = stats->rx_packets;
1103 tx_packets = stats->tx_packets;
1104 rx_bytes = stats->rx_bytes;
1105 tx_bytes = stats->tx_bytes;
1106 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1107
1108 tot->rx_packets += rx_packets;
1109 tot->tx_packets += tx_packets;
1110 tot->rx_bytes += rx_bytes;
1111 tot->tx_bytes += tx_bytes;
1112 }
1113
1114 tot->rx_errors = dev->stats.rx_errors;
1115 tot->tx_dropped = dev->stats.tx_dropped;
1116
1117 return tot;
1118}
1119
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001120static void xennet_release_tx_bufs(struct netfront_info *np)
1121{
1122 struct sk_buff *skb;
1123 int i;
1124
1125 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1126 /* Skip over entries which are actually freelist references */
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -07001127 if (skb_entry_is_link(&np->tx_skbs[i]))
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001128 continue;
1129
1130 skb = np->tx_skbs[i].skb;
1131 gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1132 GNTMAP_readonly);
1133 gnttab_release_grant_reference(&np->gref_tx_head,
1134 np->grant_tx_ref[i]);
1135 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1136 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1137 dev_kfree_skb_irq(skb);
1138 }
1139}
1140
1141static void xennet_release_rx_bufs(struct netfront_info *np)
1142{
1143 struct mmu_update *mmu = np->rx_mmu;
1144 struct multicall_entry *mcl = np->rx_mcl;
1145 struct sk_buff_head free_list;
1146 struct sk_buff *skb;
1147 unsigned long mfn;
1148 int xfer = 0, noxfer = 0, unused = 0;
1149 int id, ref;
1150
1151 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1152 __func__);
1153 return;
1154
1155 skb_queue_head_init(&free_list);
1156
1157 spin_lock_bh(&np->rx_lock);
1158
1159 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1160 ref = np->grant_rx_ref[id];
1161 if (ref == GRANT_INVALID_REF) {
1162 unused++;
1163 continue;
1164 }
1165
1166 skb = np->rx_skbs[id];
1167 mfn = gnttab_end_foreign_transfer_ref(ref);
1168 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1169 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1170
1171 if (0 == mfn) {
1172 skb_shinfo(skb)->nr_frags = 0;
1173 dev_kfree_skb(skb);
1174 noxfer++;
1175 continue;
1176 }
1177
1178 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1179 /* Remap the page. */
Ian Campbell01c68022011-10-05 00:28:47 +00001180 const struct page *page =
1181 skb_frag_page(&skb_shinfo(skb)->frags[0]);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001182 unsigned long pfn = page_to_pfn(page);
1183 void *vaddr = page_address(page);
1184
1185 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1186 mfn_pte(mfn, PAGE_KERNEL),
1187 0);
1188 mcl++;
1189 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1190 | MMU_MACHPHYS_UPDATE;
1191 mmu->val = pfn;
1192 mmu++;
1193
1194 set_phys_to_machine(pfn, mfn);
1195 }
1196 __skb_queue_tail(&free_list, skb);
1197 xfer++;
1198 }
1199
1200 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1201 __func__, xfer, noxfer, unused);
1202
1203 if (xfer) {
1204 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1205 /* Do all the remapping work and M2P updates. */
1206 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
Al Viro79ea13c2008-01-24 02:06:46 -08001207 NULL, DOMID_SELF);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001208 mcl++;
1209 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1210 }
1211 }
1212
Wang Chen56cfe5d2008-05-22 18:09:06 +08001213 __skb_queue_purge(&free_list);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001214
1215 spin_unlock_bh(&np->rx_lock);
1216}
1217
1218static void xennet_uninit(struct net_device *dev)
1219{
1220 struct netfront_info *np = netdev_priv(dev);
1221 xennet_release_tx_bufs(np);
1222 xennet_release_rx_bufs(np);
1223 gnttab_free_grant_references(np->gref_tx_head);
1224 gnttab_free_grant_references(np->gref_rx_head);
1225}
1226
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001227static netdev_features_t xennet_fix_features(struct net_device *dev,
1228 netdev_features_t features)
Eric Dumazet8f7b01a2011-04-03 17:21:00 -07001229{
1230 struct netfront_info *np = netdev_priv(dev);
1231 int val;
1232
1233 if (features & NETIF_F_SG) {
1234 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1235 "%d", &val) < 0)
1236 val = 0;
1237
1238 if (!val)
1239 features &= ~NETIF_F_SG;
1240 }
1241
1242 if (features & NETIF_F_TSO) {
1243 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1244 "feature-gso-tcpv4", "%d", &val) < 0)
1245 val = 0;
1246
1247 if (!val)
1248 features &= ~NETIF_F_TSO;
1249 }
1250
1251 return features;
1252}
1253
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001254static int xennet_set_features(struct net_device *dev,
1255 netdev_features_t features)
Eric Dumazet8f7b01a2011-04-03 17:21:00 -07001256{
1257 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1258 netdev_info(dev, "Reducing MTU because no SG offload");
1259 dev->mtu = ETH_DATA_LEN;
1260 }
1261
1262 return 0;
1263}
1264
Wei Liud634bf22013-05-22 06:34:46 +00001265static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001266{
Wei Liud634bf22013-05-22 06:34:46 +00001267 struct netfront_info *np = dev_id;
1268 struct net_device *dev = np->netdev;
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001269 unsigned long flags;
1270
1271 spin_lock_irqsave(&np->tx_lock, flags);
Wei Liud634bf22013-05-22 06:34:46 +00001272 xennet_tx_buf_gc(dev);
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001273 spin_unlock_irqrestore(&np->tx_lock, flags);
1274
1275 return IRQ_HANDLED;
1276}
1277
Wei Liud634bf22013-05-22 06:34:46 +00001278static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1279{
1280 struct netfront_info *np = dev_id;
1281 struct net_device *dev = np->netdev;
1282
1283 if (likely(netif_carrier_ok(dev) &&
1284 RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1285 napi_schedule(&np->napi);
1286
1287 return IRQ_HANDLED;
1288}
1289
1290static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1291{
1292 xennet_tx_interrupt(irq, dev_id);
1293 xennet_rx_interrupt(irq, dev_id);
1294 return IRQ_HANDLED;
1295}
1296
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001297#ifdef CONFIG_NET_POLL_CONTROLLER
1298static void xennet_poll_controller(struct net_device *dev)
1299{
1300 xennet_interrupt(0, dev);
1301}
1302#endif
1303
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001304static const struct net_device_ops xennet_netdev_ops = {
1305 .ndo_open = xennet_open,
1306 .ndo_uninit = xennet_uninit,
1307 .ndo_stop = xennet_close,
1308 .ndo_start_xmit = xennet_start_xmit,
1309 .ndo_change_mtu = xennet_change_mtu,
stephen hemmingere00f85b2011-06-21 05:35:31 +00001310 .ndo_get_stats64 = xennet_get_stats64,
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001311 .ndo_set_mac_address = eth_mac_addr,
1312 .ndo_validate_addr = eth_validate_addr,
Michał Mirosławfb507932011-03-31 01:01:35 +00001313 .ndo_fix_features = xennet_fix_features,
1314 .ndo_set_features = xennet_set_features,
Konrad Rzeszutek Wilkcf66f9d2012-01-23 08:24:43 +00001315#ifdef CONFIG_NET_POLL_CONTROLLER
1316 .ndo_poll_controller = xennet_poll_controller,
1317#endif
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001318};
1319
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05001320static struct net_device *xennet_create_dev(struct xenbus_device *dev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001321{
1322 int i, err;
1323 struct net_device *netdev;
1324 struct netfront_info *np;
1325
1326 netdev = alloc_etherdev(sizeof(struct netfront_info));
Joe Perches41de8d42012-01-29 13:47:52 +00001327 if (!netdev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001328 return ERR_PTR(-ENOMEM);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001329
1330 np = netdev_priv(netdev);
1331 np->xbdev = dev;
1332
1333 spin_lock_init(&np->tx_lock);
1334 spin_lock_init(&np->rx_lock);
1335
1336 skb_queue_head_init(&np->rx_batch);
1337 np->rx_target = RX_DFL_MIN_TARGET;
1338 np->rx_min_target = RX_DFL_MIN_TARGET;
1339 np->rx_max_target = RX_MAX_TARGET;
1340
1341 init_timer(&np->rx_refill_timer);
1342 np->rx_refill_timer.data = (unsigned long)netdev;
1343 np->rx_refill_timer.function = rx_refill_timeout;
1344
stephen hemmingere00f85b2011-06-21 05:35:31 +00001345 err = -ENOMEM;
1346 np->stats = alloc_percpu(struct netfront_stats);
1347 if (np->stats == NULL)
1348 goto exit;
1349
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001350 /* Initialise tx_skbs as a free chain containing every entry. */
1351 np->tx_skb_freelist = 0;
1352 for (i = 0; i < NET_TX_RING_SIZE; i++) {
Isaku Yamahata1ffb40b2008-07-08 15:06:31 -07001353 skb_entry_set_link(&np->tx_skbs[i], i+1);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001354 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1355 }
1356
1357 /* Clear out rx_skbs */
1358 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1359 np->rx_skbs[i] = NULL;
1360 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1361 }
1362
1363 /* A grant for every tx ring slot */
1364 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1365 &np->gref_tx_head) < 0) {
1366 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1367 err = -ENOMEM;
stephen hemmingere00f85b2011-06-21 05:35:31 +00001368 goto exit_free_stats;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001369 }
1370 /* A grant for every rx ring slot */
1371 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1372 &np->gref_rx_head) < 0) {
1373 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1374 err = -ENOMEM;
1375 goto exit_free_tx;
1376 }
1377
Stephen Hemminger0a0b9d22009-01-06 10:44:55 -08001378 netdev->netdev_ops = &xennet_netdev_ops;
1379
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001380 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
Michał Mirosławfb507932011-03-31 01:01:35 +00001381 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1382 NETIF_F_GSO_ROBUST;
1383 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001384
Ian Campbellfc3e5942011-04-04 11:07:57 -07001385 /*
1386 * Assume that all hw features are available for now. This set
1387 * will be adjusted by the call to netdev_update_features() in
1388 * xennet_connect() which is the earliest point where we can
1389 * negotiate with the backend regarding supported features.
1390 */
1391 netdev->features |= netdev->hw_features;
1392
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001393 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001394 SET_NETDEV_DEV(netdev, &dev->dev);
1395
Wei Liu9ecd1a72013-04-22 02:20:41 +00001396 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1397
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001398 np->netdev = netdev;
1399
1400 netif_carrier_off(netdev);
1401
1402 return netdev;
1403
1404 exit_free_tx:
1405 gnttab_free_grant_references(np->gref_tx_head);
stephen hemmingere00f85b2011-06-21 05:35:31 +00001406 exit_free_stats:
1407 free_percpu(np->stats);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001408 exit:
1409 free_netdev(netdev);
1410 return ERR_PTR(err);
1411}
1412
1413/**
1414 * Entry point to this code when a new device is created. Allocate the basic
1415 * structures and the ring buffers for communication with the backend, and
1416 * inform the backend of the appropriate details for those.
1417 */
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05001418static int netfront_probe(struct xenbus_device *dev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00001419 const struct xenbus_device_id *id)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001420{
1421 int err;
1422 struct net_device *netdev;
1423 struct netfront_info *info;
1424
1425 netdev = xennet_create_dev(dev);
1426 if (IS_ERR(netdev)) {
1427 err = PTR_ERR(netdev);
1428 xenbus_dev_fatal(dev, err, "creating netdev");
1429 return err;
1430 }
1431
1432 info = netdev_priv(netdev);
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001433 dev_set_drvdata(&dev->dev, info);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001434
1435 err = register_netdev(info->netdev);
1436 if (err) {
1437 printk(KERN_WARNING "%s: register_netdev err=%d\n",
1438 __func__, err);
1439 goto fail;
1440 }
1441
1442 err = xennet_sysfs_addif(info->netdev);
1443 if (err) {
1444 unregister_netdev(info->netdev);
1445 printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
1446 __func__, err);
1447 goto fail;
1448 }
1449
1450 return 0;
1451
1452 fail:
1453 free_netdev(netdev);
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001454 dev_set_drvdata(&dev->dev, NULL);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001455 return err;
1456}
1457
1458static void xennet_end_access(int ref, void *page)
1459{
1460 /* This frees the page as a side-effect */
1461 if (ref != GRANT_INVALID_REF)
1462 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1463}
1464
1465static void xennet_disconnect_backend(struct netfront_info *info)
1466{
1467 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1468 spin_lock_bh(&info->rx_lock);
1469 spin_lock_irq(&info->tx_lock);
1470 netif_carrier_off(info->netdev);
1471 spin_unlock_irq(&info->tx_lock);
1472 spin_unlock_bh(&info->rx_lock);
1473
Wei Liud634bf22013-05-22 06:34:46 +00001474 if (info->tx_irq && (info->tx_irq == info->rx_irq))
1475 unbind_from_irqhandler(info->tx_irq, info);
1476 if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1477 unbind_from_irqhandler(info->tx_irq, info);
1478 unbind_from_irqhandler(info->rx_irq, info);
1479 }
1480 info->tx_evtchn = info->rx_evtchn = 0;
1481 info->tx_irq = info->rx_irq = 0;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001482
1483 /* End access and free the pages */
1484 xennet_end_access(info->tx_ring_ref, info->tx.sring);
1485 xennet_end_access(info->rx_ring_ref, info->rx.sring);
1486
1487 info->tx_ring_ref = GRANT_INVALID_REF;
1488 info->rx_ring_ref = GRANT_INVALID_REF;
1489 info->tx.sring = NULL;
1490 info->rx.sring = NULL;
1491}
1492
1493/**
1494 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1495 * driver restart. We tear down our netif structure and recreate it, but
1496 * leave the device-layer structures intact so that this is transparent to the
1497 * rest of the kernel.
1498 */
1499static int netfront_resume(struct xenbus_device *dev)
1500{
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001501 struct netfront_info *info = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001502
1503 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1504
1505 xennet_disconnect_backend(info);
1506 return 0;
1507}
1508
1509static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1510{
1511 char *s, *e, *macstr;
1512 int i;
1513
1514 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1515 if (IS_ERR(macstr))
1516 return PTR_ERR(macstr);
1517
1518 for (i = 0; i < ETH_ALEN; i++) {
1519 mac[i] = simple_strtoul(s, &e, 16);
1520 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1521 kfree(macstr);
1522 return -ENOENT;
1523 }
1524 s = e+1;
1525 }
1526
1527 kfree(macstr);
1528 return 0;
1529}
1530
Wei Liud634bf22013-05-22 06:34:46 +00001531static int setup_netfront_single(struct netfront_info *info)
1532{
1533 int err;
1534
1535 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1536 if (err < 0)
1537 goto fail;
1538
1539 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1540 xennet_interrupt,
1541 0, info->netdev->name, info);
1542 if (err < 0)
1543 goto bind_fail;
1544 info->rx_evtchn = info->tx_evtchn;
1545 info->rx_irq = info->tx_irq = err;
1546
1547 return 0;
1548
1549bind_fail:
1550 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1551 info->tx_evtchn = 0;
1552fail:
1553 return err;
1554}
1555
1556static int setup_netfront_split(struct netfront_info *info)
1557{
1558 int err;
1559
1560 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1561 if (err < 0)
1562 goto fail;
1563 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1564 if (err < 0)
1565 goto alloc_rx_evtchn_fail;
1566
1567 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1568 "%s-tx", info->netdev->name);
1569 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1570 xennet_tx_interrupt,
1571 0, info->tx_irq_name, info);
1572 if (err < 0)
1573 goto bind_tx_fail;
1574 info->tx_irq = err;
1575
1576 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1577 "%s-rx", info->netdev->name);
1578 err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1579 xennet_rx_interrupt,
1580 0, info->rx_irq_name, info);
1581 if (err < 0)
1582 goto bind_rx_fail;
1583 info->rx_irq = err;
1584
1585 return 0;
1586
1587bind_rx_fail:
1588 unbind_from_irqhandler(info->tx_irq, info);
1589 info->tx_irq = 0;
1590bind_tx_fail:
1591 xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1592 info->rx_evtchn = 0;
1593alloc_rx_evtchn_fail:
1594 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1595 info->tx_evtchn = 0;
1596fail:
1597 return err;
1598}
1599
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001600static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1601{
1602 struct xen_netif_tx_sring *txs;
1603 struct xen_netif_rx_sring *rxs;
1604 int err;
1605 struct net_device *netdev = info->netdev;
Wei Liud634bf22013-05-22 06:34:46 +00001606 unsigned int feature_split_evtchn;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001607
1608 info->tx_ring_ref = GRANT_INVALID_REF;
1609 info->rx_ring_ref = GRANT_INVALID_REF;
1610 info->rx.sring = NULL;
1611 info->tx.sring = NULL;
1612 netdev->irq = 0;
1613
Wei Liud634bf22013-05-22 06:34:46 +00001614 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1615 "feature-split-event-channels", "%u",
1616 &feature_split_evtchn);
1617 if (err < 0)
1618 feature_split_evtchn = 0;
1619
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001620 err = xen_net_read_mac(dev, netdev->dev_addr);
1621 if (err) {
1622 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1623 goto fail;
1624 }
1625
Ian Campbella144ff02008-06-17 10:47:08 +02001626 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001627 if (!txs) {
1628 err = -ENOMEM;
1629 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1630 goto fail;
1631 }
1632 SHARED_RING_INIT(txs);
1633 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1634
1635 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
Wei Liu1ca29832013-05-20 01:05:12 +00001636 if (err < 0)
1637 goto grant_tx_ring_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001638
1639 info->tx_ring_ref = err;
Ian Campbella144ff02008-06-17 10:47:08 +02001640 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001641 if (!rxs) {
1642 err = -ENOMEM;
1643 xenbus_dev_fatal(dev, err, "allocating rx ring page");
Wei Liu1ca29832013-05-20 01:05:12 +00001644 goto alloc_rx_ring_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001645 }
1646 SHARED_RING_INIT(rxs);
1647 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1648
1649 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
Wei Liu1ca29832013-05-20 01:05:12 +00001650 if (err < 0)
1651 goto grant_rx_ring_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001652 info->rx_ring_ref = err;
1653
Wei Liud634bf22013-05-22 06:34:46 +00001654 if (feature_split_evtchn)
1655 err = setup_netfront_split(info);
1656 /* setup single event channel if
1657 * a) feature-split-event-channels == 0
1658 * b) feature-split-event-channels == 1 but failed to setup
1659 */
1660 if (!feature_split_evtchn || (feature_split_evtchn && err))
1661 err = setup_netfront_single(info);
1662
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001663 if (err)
Wei Liu1ca29832013-05-20 01:05:12 +00001664 goto alloc_evtchn_fail;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001665
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001666 return 0;
1667
Wei Liu1ca29832013-05-20 01:05:12 +00001668 /* If we fail to setup netfront, it is safe to just revoke access to
1669 * granted pages because backend is not accessing it at this point.
1670 */
Wei Liu1ca29832013-05-20 01:05:12 +00001671alloc_evtchn_fail:
1672 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1673grant_rx_ring_fail:
1674 free_page((unsigned long)rxs);
1675alloc_rx_ring_fail:
1676 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1677grant_tx_ring_fail:
1678 free_page((unsigned long)txs);
1679fail:
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001680 return err;
1681}
1682
1683/* Common code used when first setting up, and when resuming. */
Ian Campbellf502bf22010-08-18 23:27:49 +00001684static int talk_to_netback(struct xenbus_device *dev,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001685 struct netfront_info *info)
1686{
1687 const char *message;
1688 struct xenbus_transaction xbt;
1689 int err;
1690
1691 /* Create shared ring, alloc event channel. */
1692 err = setup_netfront(dev, info);
1693 if (err)
1694 goto out;
1695
1696again:
1697 err = xenbus_transaction_start(&xbt);
1698 if (err) {
1699 xenbus_dev_fatal(dev, err, "starting transaction");
1700 goto destroy_ring;
1701 }
1702
1703 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1704 info->tx_ring_ref);
1705 if (err) {
1706 message = "writing tx ring-ref";
1707 goto abort_transaction;
1708 }
1709 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1710 info->rx_ring_ref);
1711 if (err) {
1712 message = "writing rx ring-ref";
1713 goto abort_transaction;
1714 }
Wei Liud634bf22013-05-22 06:34:46 +00001715
1716 if (info->tx_evtchn == info->rx_evtchn) {
1717 err = xenbus_printf(xbt, dev->nodename,
1718 "event-channel", "%u", info->tx_evtchn);
1719 if (err) {
1720 message = "writing event-channel";
1721 goto abort_transaction;
1722 }
1723 } else {
1724 err = xenbus_printf(xbt, dev->nodename,
1725 "event-channel-tx", "%u", info->tx_evtchn);
1726 if (err) {
1727 message = "writing event-channel-tx";
1728 goto abort_transaction;
1729 }
1730 err = xenbus_printf(xbt, dev->nodename,
1731 "event-channel-rx", "%u", info->rx_evtchn);
1732 if (err) {
1733 message = "writing event-channel-rx";
1734 goto abort_transaction;
1735 }
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001736 }
1737
1738 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1739 1);
1740 if (err) {
1741 message = "writing request-rx-copy";
1742 goto abort_transaction;
1743 }
1744
1745 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1746 if (err) {
1747 message = "writing feature-rx-notify";
1748 goto abort_transaction;
1749 }
1750
1751 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1752 if (err) {
1753 message = "writing feature-sg";
1754 goto abort_transaction;
1755 }
1756
1757 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1758 if (err) {
1759 message = "writing feature-gso-tcpv4";
1760 goto abort_transaction;
1761 }
1762
1763 err = xenbus_transaction_end(xbt, 0);
1764 if (err) {
1765 if (err == -EAGAIN)
1766 goto again;
1767 xenbus_dev_fatal(dev, err, "completing transaction");
1768 goto destroy_ring;
1769 }
1770
1771 return 0;
1772
1773 abort_transaction:
1774 xenbus_transaction_end(xbt, 1);
1775 xenbus_dev_fatal(dev, err, "%s", message);
1776 destroy_ring:
1777 xennet_disconnect_backend(info);
1778 out:
1779 return err;
1780}
1781
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001782static int xennet_connect(struct net_device *dev)
1783{
1784 struct netfront_info *np = netdev_priv(dev);
1785 int i, requeue_idx, err;
1786 struct sk_buff *skb;
1787 grant_ref_t ref;
1788 struct xen_netif_rx_request *req;
1789 unsigned int feature_rx_copy;
1790
1791 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1792 "feature-rx-copy", "%u", &feature_rx_copy);
1793 if (err != 1)
1794 feature_rx_copy = 0;
1795
1796 if (!feature_rx_copy) {
1797 dev_info(&dev->dev,
Joe Perches898eb712007-10-18 03:06:30 -07001798 "backend does not support copying receive path\n");
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001799 return -ENODEV;
1800 }
1801
Ian Campbellf502bf22010-08-18 23:27:49 +00001802 err = talk_to_netback(np->xbdev, np);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001803 if (err)
1804 return err;
1805
Ian Campbell1ba37c52011-05-24 21:56:02 +00001806 rtnl_lock();
Michał Mirosławfb507932011-03-31 01:01:35 +00001807 netdev_update_features(dev);
Ian Campbell1ba37c52011-05-24 21:56:02 +00001808 rtnl_unlock();
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001809
1810 spin_lock_bh(&np->rx_lock);
1811 spin_lock_irq(&np->tx_lock);
1812
1813 /* Step 1: Discard all pending TX packet fragments. */
1814 xennet_release_tx_bufs(np);
1815
1816 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1817 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
Ian Campbell01c68022011-10-05 00:28:47 +00001818 skb_frag_t *frag;
1819 const struct page *page;
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001820 if (!np->rx_skbs[i])
1821 continue;
1822
1823 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1824 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1825 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1826
Ian Campbell01c68022011-10-05 00:28:47 +00001827 frag = &skb_shinfo(skb)->frags[0];
1828 page = skb_frag_page(frag);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001829 gnttab_grant_foreign_access_ref(
1830 ref, np->xbdev->otherend_id,
Ian Campbell01c68022011-10-05 00:28:47 +00001831 pfn_to_mfn(page_to_pfn(page)),
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001832 0);
1833 req->gref = ref;
1834 req->id = requeue_idx;
1835
1836 requeue_idx++;
1837 }
1838
1839 np->rx.req_prod_pvt = requeue_idx;
1840
1841 /*
1842 * Step 3: All public and private state should now be sane. Get
1843 * ready to start sending and receiving packets and give the driver
1844 * domain a kick because we've probably just requeued some
1845 * packets.
1846 */
1847 netif_carrier_on(np->netdev);
Wei Liud634bf22013-05-22 06:34:46 +00001848 notify_remote_via_irq(np->tx_irq);
1849 if (np->tx_irq != np->rx_irq)
1850 notify_remote_via_irq(np->rx_irq);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001851 xennet_tx_buf_gc(dev);
1852 xennet_alloc_rx_buffers(dev);
1853
1854 spin_unlock_irq(&np->tx_lock);
1855 spin_unlock_bh(&np->rx_lock);
1856
1857 return 0;
1858}
1859
1860/**
1861 * Callback received when the backend's state changes.
1862 */
Ian Campbellf502bf22010-08-18 23:27:49 +00001863static void netback_changed(struct xenbus_device *dev,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001864 enum xenbus_state backend_state)
1865{
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07001866 struct netfront_info *np = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001867 struct net_device *netdev = np->netdev;
1868
1869 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1870
1871 switch (backend_state) {
1872 case XenbusStateInitialising:
1873 case XenbusStateInitialised:
Noboru Iwamatsub78c9512009-10-13 17:22:29 -04001874 case XenbusStateReconfiguring:
1875 case XenbusStateReconfigured:
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001876 case XenbusStateUnknown:
1877 case XenbusStateClosed:
1878 break;
1879
1880 case XenbusStateInitWait:
1881 if (dev->state != XenbusStateInitialising)
1882 break;
1883 if (xennet_connect(netdev) != 0)
1884 break;
1885 xenbus_switch_state(dev, XenbusStateConnected);
Laszlo Ersek08e34eb2011-12-11 01:48:59 +00001886 break;
1887
1888 case XenbusStateConnected:
Amerigo Wangee89bab2012-08-09 22:14:56 +00001889 netdev_notify_peers(netdev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001890 break;
1891
1892 case XenbusStateClosing:
1893 xenbus_frontend_closed(dev);
1894 break;
1895 }
1896}
1897
Ian Campbelle0ce4af2011-01-27 04:14:03 +00001898static const struct xennet_stat {
1899 char name[ETH_GSTRING_LEN];
1900 u16 offset;
1901} xennet_stats[] = {
1902 {
1903 "rx_gso_checksum_fixup",
1904 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1905 },
1906};
1907
1908static int xennet_get_sset_count(struct net_device *dev, int string_set)
1909{
1910 switch (string_set) {
1911 case ETH_SS_STATS:
1912 return ARRAY_SIZE(xennet_stats);
1913 default:
1914 return -EINVAL;
1915 }
1916}
1917
1918static void xennet_get_ethtool_stats(struct net_device *dev,
1919 struct ethtool_stats *stats, u64 * data)
1920{
1921 void *np = netdev_priv(dev);
1922 int i;
1923
1924 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
Eric Dumazet48f26d52011-03-14 21:05:40 -07001925 data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
Ian Campbelle0ce4af2011-01-27 04:14:03 +00001926}
1927
1928static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1929{
1930 int i;
1931
1932 switch (stringset) {
1933 case ETH_SS_STATS:
1934 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1935 memcpy(data + i * ETH_GSTRING_LEN,
1936 xennet_stats[i].name, ETH_GSTRING_LEN);
1937 break;
1938 }
1939}
1940
Stephen Hemminger0fc0b732009-09-02 01:03:33 -07001941static const struct ethtool_ops xennet_ethtool_ops =
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001942{
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001943 .get_link = ethtool_op_get_link,
Ian Campbelle0ce4af2011-01-27 04:14:03 +00001944
1945 .get_sset_count = xennet_get_sset_count,
1946 .get_ethtool_stats = xennet_get_ethtool_stats,
1947 .get_strings = xennet_get_strings,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07001948};
1949
1950#ifdef CONFIG_SYSFS
1951static ssize_t show_rxbuf_min(struct device *dev,
1952 struct device_attribute *attr, char *buf)
1953{
1954 struct net_device *netdev = to_net_dev(dev);
1955 struct netfront_info *info = netdev_priv(netdev);
1956
1957 return sprintf(buf, "%u\n", info->rx_min_target);
1958}
1959
1960static ssize_t store_rxbuf_min(struct device *dev,
1961 struct device_attribute *attr,
1962 const char *buf, size_t len)
1963{
1964 struct net_device *netdev = to_net_dev(dev);
1965 struct netfront_info *np = netdev_priv(netdev);
1966 char *endp;
1967 unsigned long target;
1968
1969 if (!capable(CAP_NET_ADMIN))
1970 return -EPERM;
1971
1972 target = simple_strtoul(buf, &endp, 0);
1973 if (endp == buf)
1974 return -EBADMSG;
1975
1976 if (target < RX_MIN_TARGET)
1977 target = RX_MIN_TARGET;
1978 if (target > RX_MAX_TARGET)
1979 target = RX_MAX_TARGET;
1980
1981 spin_lock_bh(&np->rx_lock);
1982 if (target > np->rx_max_target)
1983 np->rx_max_target = target;
1984 np->rx_min_target = target;
1985 if (target > np->rx_target)
1986 np->rx_target = target;
1987
1988 xennet_alloc_rx_buffers(netdev);
1989
1990 spin_unlock_bh(&np->rx_lock);
1991 return len;
1992}
1993
1994static ssize_t show_rxbuf_max(struct device *dev,
1995 struct device_attribute *attr, char *buf)
1996{
1997 struct net_device *netdev = to_net_dev(dev);
1998 struct netfront_info *info = netdev_priv(netdev);
1999
2000 return sprintf(buf, "%u\n", info->rx_max_target);
2001}
2002
2003static ssize_t store_rxbuf_max(struct device *dev,
2004 struct device_attribute *attr,
2005 const char *buf, size_t len)
2006{
2007 struct net_device *netdev = to_net_dev(dev);
2008 struct netfront_info *np = netdev_priv(netdev);
2009 char *endp;
2010 unsigned long target;
2011
2012 if (!capable(CAP_NET_ADMIN))
2013 return -EPERM;
2014
2015 target = simple_strtoul(buf, &endp, 0);
2016 if (endp == buf)
2017 return -EBADMSG;
2018
2019 if (target < RX_MIN_TARGET)
2020 target = RX_MIN_TARGET;
2021 if (target > RX_MAX_TARGET)
2022 target = RX_MAX_TARGET;
2023
2024 spin_lock_bh(&np->rx_lock);
2025 if (target < np->rx_min_target)
2026 np->rx_min_target = target;
2027 np->rx_max_target = target;
2028 if (target < np->rx_target)
2029 np->rx_target = target;
2030
2031 xennet_alloc_rx_buffers(netdev);
2032
2033 spin_unlock_bh(&np->rx_lock);
2034 return len;
2035}
2036
2037static ssize_t show_rxbuf_cur(struct device *dev,
2038 struct device_attribute *attr, char *buf)
2039{
2040 struct net_device *netdev = to_net_dev(dev);
2041 struct netfront_info *info = netdev_priv(netdev);
2042
2043 return sprintf(buf, "%u\n", info->rx_target);
2044}
2045
2046static struct device_attribute xennet_attrs[] = {
2047 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2048 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2049 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2050};
2051
2052static int xennet_sysfs_addif(struct net_device *netdev)
2053{
2054 int i;
2055 int err;
2056
2057 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2058 err = device_create_file(&netdev->dev,
2059 &xennet_attrs[i]);
2060 if (err)
2061 goto fail;
2062 }
2063 return 0;
2064
2065 fail:
2066 while (--i >= 0)
2067 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2068 return err;
2069}
2070
2071static void xennet_sysfs_delif(struct net_device *netdev)
2072{
2073 int i;
2074
2075 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2076 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2077}
2078
2079#endif /* CONFIG_SYSFS */
2080
Jan Beulich73db1442011-12-22 09:08:13 +00002081static const struct xenbus_device_id netfront_ids[] = {
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002082 { "vif" },
2083 { "" }
2084};
2085
2086
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05002087static int xennet_remove(struct xenbus_device *dev)
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002088{
Greg Kroah-Hartman1b713e02009-05-04 12:40:54 -07002089 struct netfront_info *info = dev_get_drvdata(&dev->dev);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002090
2091 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2092
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002093 xennet_disconnect_backend(info);
2094
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002095 xennet_sysfs_delif(info->netdev);
2096
Ian Campbell6bc96d02012-06-25 22:48:41 +00002097 unregister_netdev(info->netdev);
2098
2099 del_timer_sync(&info->rx_refill_timer);
2100
stephen hemmingere00f85b2011-06-21 05:35:31 +00002101 free_percpu(info->stats);
2102
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002103 free_netdev(info->netdev);
2104
2105 return 0;
2106}
2107
Jan Beulich73db1442011-12-22 09:08:13 +00002108static DEFINE_XENBUS_DRIVER(netfront, ,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002109 .probe = netfront_probe,
Bill Pemberton8e0e46b2012-12-03 09:24:22 -05002110 .remove = xennet_remove,
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002111 .resume = netfront_resume,
Ian Campbellf502bf22010-08-18 23:27:49 +00002112 .otherend_changed = netback_changed,
Jan Beulich73db1442011-12-22 09:08:13 +00002113);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002114
2115static int __init netif_init(void)
2116{
Jeremy Fitzhardinge6e833582008-08-19 13:16:17 -07002117 if (!xen_domain())
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002118 return -ENODEV;
2119
Igor Mammedove95ae5a2012-03-27 19:31:08 +02002120 if (xen_hvm_domain() && !xen_platform_pci_unplug)
Igor Mammedovb9136d22012-03-21 15:08:38 +01002121 return -ENODEV;
2122
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002123 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
2124
Al Viroffb78a22008-11-22 17:38:14 +00002125 return xenbus_register_frontend(&netfront_driver);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002126}
2127module_init(netif_init);
2128
2129
2130static void __exit netif_exit(void)
2131{
Al Viroffb78a22008-11-22 17:38:14 +00002132 xenbus_unregister_driver(&netfront_driver);
Jeremy Fitzhardinge0d160212007-07-17 18:37:06 -07002133}
2134module_exit(netif_exit);
2135
2136MODULE_DESCRIPTION("Xen virtual network device frontend");
2137MODULE_LICENSE("GPL");
Mark McLoughlind2f0c522008-04-02 10:54:05 -07002138MODULE_ALIAS("xen:vif");
Mark McLoughlin4f93f09b2008-04-02 10:54:06 -07002139MODULE_ALIAS("xennet");