Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1 | /* A network driver using virtio. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2 | * |
| 3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | //#define DEBUG |
| 20 | #include <linux/netdevice.h> |
| 21 | #include <linux/etherdevice.h> |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 22 | #include <linux/ethtool.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 23 | #include <linux/module.h> |
| 24 | #include <linux/virtio.h> |
| 25 | #include <linux/virtio_net.h> |
| 26 | #include <linux/scatterlist.h> |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 27 | #include <linux/if_vlan.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 29 | |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 30 | static int napi_weight = 128; |
| 31 | module_param(napi_weight, int, 0444); |
| 32 | |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 33 | static int csum = 1, gso = 1; |
| 34 | module_param(csum, bool, 0444); |
| 35 | module_param(gso, bool, 0444); |
| 36 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 37 | /* FIXME: MTU in config. */ |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 38 | #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 39 | #define GOOD_COPY_LEN 128 |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 40 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 41 | #define VIRTNET_SEND_COMMAND_SG_MAX 2 |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 42 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 43 | struct virtnet_stats { |
| 44 | struct u64_stats_sync syncp; |
| 45 | u64 tx_bytes; |
| 46 | u64 tx_packets; |
| 47 | |
| 48 | u64 rx_bytes; |
| 49 | u64 rx_packets; |
| 50 | }; |
| 51 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 52 | struct virtnet_info { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 53 | struct virtio_device *vdev; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 54 | struct virtqueue *rvq, *svq, *cvq; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 55 | struct net_device *dev; |
| 56 | struct napi_struct napi; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 57 | unsigned int status; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 58 | |
| 59 | /* Number of input buffers, and max we've ever had. */ |
| 60 | unsigned int num, max; |
| 61 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 62 | /* I like... big packets and I cannot lie! */ |
| 63 | bool big_packets; |
| 64 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 65 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 66 | bool mergeable_rx_bufs; |
| 67 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 68 | /* Active statistics */ |
| 69 | struct virtnet_stats __percpu *stats; |
| 70 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 71 | /* Work struct for refilling if we run low on memory. */ |
| 72 | struct delayed_work refill; |
| 73 | |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 74 | /* Chain pages by the private ptr. */ |
| 75 | struct page *pages; |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 76 | |
| 77 | /* fragments + linear part + virtio header */ |
| 78 | struct scatterlist rx_sg[MAX_SKB_FRAGS + 2]; |
| 79 | struct scatterlist tx_sg[MAX_SKB_FRAGS + 2]; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 80 | }; |
| 81 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 82 | struct skb_vnet_hdr { |
| 83 | union { |
| 84 | struct virtio_net_hdr hdr; |
| 85 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
| 86 | }; |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 87 | unsigned int num_sg; |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 88 | }; |
| 89 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 90 | struct padded_vnet_hdr { |
| 91 | struct virtio_net_hdr hdr; |
| 92 | /* |
| 93 | * virtio_net_hdr should be in a separated sg buffer because of a |
| 94 | * QEMU bug, and data sg buffer shares same page with this header sg. |
| 95 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. |
| 96 | */ |
| 97 | char padding[6]; |
| 98 | }; |
| 99 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 100 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 101 | { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 102 | return (struct skb_vnet_hdr *)skb->cb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 103 | } |
| 104 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 105 | /* |
| 106 | * private is used to chain pages for big packets, put the whole |
| 107 | * most recent used list in the beginning for reuse |
| 108 | */ |
| 109 | static void give_pages(struct virtnet_info *vi, struct page *page) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 110 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 111 | struct page *end; |
| 112 | |
| 113 | /* Find end of list, sew whole thing into vi->pages. */ |
| 114 | for (end = page; end->private; end = (struct page *)end->private); |
| 115 | end->private = (unsigned long)vi->pages; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 116 | vi->pages = page; |
| 117 | } |
| 118 | |
| 119 | static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) |
| 120 | { |
| 121 | struct page *p = vi->pages; |
| 122 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 123 | if (p) { |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 124 | vi->pages = (struct page *)p->private; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 125 | /* clear private here, it is used to chain pages */ |
| 126 | p->private = 0; |
| 127 | } else |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 128 | p = alloc_page(gfp_mask); |
| 129 | return p; |
| 130 | } |
| 131 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 132 | static void skb_xmit_done(struct virtqueue *svq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 133 | { |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 134 | struct virtnet_info *vi = svq->vdev->priv; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 135 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 136 | /* Suppress further interrupts. */ |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 137 | virtqueue_disable_cb(svq); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 138 | |
Rusty Russell | 363f151 | 2008-06-08 20:51:55 +1000 | [diff] [blame] | 139 | /* We were probably waiting for more output buffers. */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 140 | netif_wake_queue(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 141 | } |
| 142 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 143 | static void set_skb_frag(struct sk_buff *skb, struct page *page, |
| 144 | unsigned int offset, unsigned int *len) |
| 145 | { |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 146 | int size = min((unsigned)PAGE_SIZE - offset, *len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 147 | int i = skb_shinfo(skb)->nr_frags; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 148 | |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 149 | __skb_fill_page_desc(skb, i, page, offset, size); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 150 | |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 151 | skb->data_len += size; |
| 152 | skb->len += size; |
Eric Dumazet | 4b72736 | 2011-10-19 23:14:46 +0000 | [diff] [blame] | 153 | skb->truesize += PAGE_SIZE; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 154 | skb_shinfo(skb)->nr_frags++; |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 155 | *len -= size; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 159 | struct page *page, unsigned int len) |
| 160 | { |
| 161 | struct sk_buff *skb; |
| 162 | struct skb_vnet_hdr *hdr; |
| 163 | unsigned int copy, hdr_len, offset; |
| 164 | char *p; |
| 165 | |
| 166 | p = page_address(page); |
| 167 | |
| 168 | /* copy small packet so we can reuse these pages for small data */ |
| 169 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); |
| 170 | if (unlikely(!skb)) |
| 171 | return NULL; |
| 172 | |
| 173 | hdr = skb_vnet_hdr(skb); |
| 174 | |
| 175 | if (vi->mergeable_rx_bufs) { |
| 176 | hdr_len = sizeof hdr->mhdr; |
| 177 | offset = hdr_len; |
| 178 | } else { |
| 179 | hdr_len = sizeof hdr->hdr; |
| 180 | offset = sizeof(struct padded_vnet_hdr); |
| 181 | } |
| 182 | |
| 183 | memcpy(hdr, p, hdr_len); |
| 184 | |
| 185 | len -= hdr_len; |
| 186 | p += offset; |
| 187 | |
| 188 | copy = len; |
| 189 | if (copy > skb_tailroom(skb)) |
| 190 | copy = skb_tailroom(skb); |
| 191 | memcpy(skb_put(skb, copy), p, copy); |
| 192 | |
| 193 | len -= copy; |
| 194 | offset += copy; |
| 195 | |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 196 | /* |
| 197 | * Verify that we can indeed put this data into a skb. |
| 198 | * This is here to handle cases when the device erroneously |
| 199 | * tries to receive more than is possible. This is usually |
| 200 | * the case of a broken device. |
| 201 | */ |
| 202 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
| 203 | if (net_ratelimit()) |
| 204 | pr_debug("%s: too much data\n", skb->dev->name); |
| 205 | dev_kfree_skb(skb); |
| 206 | return NULL; |
| 207 | } |
| 208 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 209 | while (len) { |
| 210 | set_skb_frag(skb, page, offset, &len); |
| 211 | page = (struct page *)page->private; |
| 212 | offset = 0; |
| 213 | } |
| 214 | |
| 215 | if (page) |
| 216 | give_pages(vi, page); |
| 217 | |
| 218 | return skb; |
| 219 | } |
| 220 | |
| 221 | static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) |
| 222 | { |
| 223 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
| 224 | struct page *page; |
| 225 | int num_buf, i, len; |
| 226 | |
| 227 | num_buf = hdr->mhdr.num_buffers; |
| 228 | while (--num_buf) { |
| 229 | i = skb_shinfo(skb)->nr_frags; |
| 230 | if (i >= MAX_SKB_FRAGS) { |
| 231 | pr_debug("%s: packet too long\n", skb->dev->name); |
| 232 | skb->dev->stats.rx_length_errors++; |
| 233 | return -EINVAL; |
| 234 | } |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 235 | page = virtqueue_get_buf(vi->rvq, &len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 236 | if (!page) { |
| 237 | pr_debug("%s: rx error: %d buffers missing\n", |
| 238 | skb->dev->name, hdr->mhdr.num_buffers); |
| 239 | skb->dev->stats.rx_length_errors++; |
| 240 | return -EINVAL; |
| 241 | } |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 242 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 243 | if (len > PAGE_SIZE) |
| 244 | len = PAGE_SIZE; |
| 245 | |
| 246 | set_skb_frag(skb, page, 0, &len); |
| 247 | |
| 248 | --vi->num; |
| 249 | } |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | static void receive_buf(struct net_device *dev, void *buf, unsigned int len) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 254 | { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 255 | struct virtnet_info *vi = netdev_priv(dev); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 256 | struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 257 | struct sk_buff *skb; |
| 258 | struct page *page; |
| 259 | struct skb_vnet_hdr *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 260 | |
| 261 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
| 262 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 263 | dev->stats.rx_length_errors++; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 264 | if (vi->mergeable_rx_bufs || vi->big_packets) |
| 265 | give_pages(vi, buf); |
| 266 | else |
| 267 | dev_kfree_skb(buf); |
| 268 | return; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 269 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 270 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 271 | if (!vi->mergeable_rx_bufs && !vi->big_packets) { |
| 272 | skb = buf; |
| 273 | len -= sizeof(struct virtio_net_hdr); |
| 274 | skb_trim(skb, len); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 275 | } else { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 276 | page = buf; |
| 277 | skb = page_to_skb(vi, page, len); |
| 278 | if (unlikely(!skb)) { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 279 | dev->stats.rx_dropped++; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 280 | give_pages(vi, page); |
| 281 | return; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 282 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 283 | if (vi->mergeable_rx_bufs) |
| 284 | if (receive_mergeable(vi, skb)) { |
| 285 | dev_kfree_skb(skb); |
| 286 | return; |
| 287 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 288 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 289 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 290 | hdr = skb_vnet_hdr(skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 291 | |
| 292 | u64_stats_update_begin(&stats->syncp); |
| 293 | stats->rx_bytes += skb->len; |
| 294 | stats->rx_packets++; |
| 295 | u64_stats_update_end(&stats->syncp); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 296 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 297 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 298 | pr_debug("Needs csum!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 299 | if (!skb_partial_csum_set(skb, |
| 300 | hdr->hdr.csum_start, |
| 301 | hdr->hdr.csum_offset)) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 302 | goto frame_err; |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 303 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { |
| 304 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 305 | } |
| 306 | |
Mark McLoughlin | 23cde76 | 2008-06-08 20:49:00 +1000 | [diff] [blame] | 307 | skb->protocol = eth_type_trans(skb, dev); |
| 308 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 309 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 310 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 311 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 312 | pr_debug("GSO!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 313 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 314 | case VIRTIO_NET_HDR_GSO_TCPV4: |
| 315 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
| 316 | break; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 317 | case VIRTIO_NET_HDR_GSO_UDP: |
| 318 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
| 319 | break; |
| 320 | case VIRTIO_NET_HDR_GSO_TCPV6: |
| 321 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
| 322 | break; |
| 323 | default: |
| 324 | if (net_ratelimit()) |
| 325 | printk(KERN_WARNING "%s: bad gso type %u.\n", |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 326 | dev->name, hdr->hdr.gso_type); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 327 | goto frame_err; |
| 328 | } |
| 329 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 330 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 331 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
| 332 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 333 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 334 | if (skb_shinfo(skb)->gso_size == 0) { |
| 335 | if (net_ratelimit()) |
| 336 | printk(KERN_WARNING "%s: zero gso size.\n", |
| 337 | dev->name); |
| 338 | goto frame_err; |
| 339 | } |
| 340 | |
| 341 | /* Header must be checked, and gso_segs computed. */ |
| 342 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 343 | skb_shinfo(skb)->gso_segs = 0; |
| 344 | } |
| 345 | |
| 346 | netif_receive_skb(skb); |
| 347 | return; |
| 348 | |
| 349 | frame_err: |
| 350 | dev->stats.rx_frame_errors++; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 351 | dev_kfree_skb(skb); |
| 352 | } |
| 353 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 354 | static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 355 | { |
| 356 | struct sk_buff *skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 357 | struct skb_vnet_hdr *hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 358 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 359 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 360 | skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); |
| 361 | if (unlikely(!skb)) |
| 362 | return -ENOMEM; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 363 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 364 | skb_put(skb, MAX_PACKET_LEN); |
| 365 | |
| 366 | hdr = skb_vnet_hdr(skb); |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 367 | sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 368 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 369 | skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 370 | |
Michael S. Tsirkin | aa989f5 | 2010-05-31 01:10:01 +0000 | [diff] [blame] | 371 | err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 372 | if (err < 0) |
| 373 | dev_kfree_skb(skb); |
| 374 | |
| 375 | return err; |
| 376 | } |
| 377 | |
| 378 | static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) |
| 379 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 380 | struct page *first, *list = NULL; |
| 381 | char *p; |
| 382 | int i, err, offset; |
| 383 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 384 | /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 385 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
| 386 | first = get_a_page(vi, gfp); |
| 387 | if (!first) { |
| 388 | if (list) |
| 389 | give_pages(vi, list); |
| 390 | return -ENOMEM; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 391 | } |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 392 | sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 393 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 394 | /* chain new page in list head to match sg */ |
| 395 | first->private = (unsigned long)list; |
| 396 | list = first; |
| 397 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 398 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 399 | first = get_a_page(vi, gfp); |
| 400 | if (!first) { |
| 401 | give_pages(vi, list); |
| 402 | return -ENOMEM; |
| 403 | } |
| 404 | p = page_address(first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 405 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 406 | /* vi->rx_sg[0], vi->rx_sg[1] share the same page */ |
| 407 | /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ |
| 408 | sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 409 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 410 | /* vi->rx_sg[1] for data packet, from offset */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 411 | offset = sizeof(struct padded_vnet_hdr); |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 412 | sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 413 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 414 | /* chain first in list head */ |
| 415 | first->private = (unsigned long)list; |
Michael S. Tsirkin | aa989f5 | 2010-05-31 01:10:01 +0000 | [diff] [blame] | 416 | err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, |
| 417 | first, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 418 | if (err < 0) |
| 419 | give_pages(vi, first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 420 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 421 | return err; |
| 422 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 423 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 424 | static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) |
| 425 | { |
| 426 | struct page *page; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 427 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 428 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 429 | page = get_a_page(vi, gfp); |
| 430 | if (!page) |
| 431 | return -ENOMEM; |
| 432 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 433 | sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 434 | |
Michael S. Tsirkin | aa989f5 | 2010-05-31 01:10:01 +0000 | [diff] [blame] | 435 | err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 436 | if (err < 0) |
| 437 | give_pages(vi, page); |
| 438 | |
| 439 | return err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 440 | } |
| 441 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 442 | /* Returns false if we couldn't fill entirely (OOM). */ |
| 443 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 444 | { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 445 | int err; |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 446 | bool oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 447 | |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 448 | do { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 449 | if (vi->mergeable_rx_bufs) |
| 450 | err = add_recvbuf_mergeable(vi, gfp); |
| 451 | else if (vi->big_packets) |
| 452 | err = add_recvbuf_big(vi, gfp); |
| 453 | else |
| 454 | err = add_recvbuf_small(vi, gfp); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 455 | |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 456 | oom = err == -ENOMEM; |
| 457 | if (err < 0) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 458 | break; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 459 | ++vi->num; |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 460 | } while (err > 0); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 461 | if (unlikely(vi->num > vi->max)) |
| 462 | vi->max = vi->num; |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 463 | virtqueue_kick(vi->rvq); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 464 | return !oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 465 | } |
| 466 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 467 | static void skb_recv_done(struct virtqueue *rvq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 468 | { |
| 469 | struct virtnet_info *vi = rvq->vdev->priv; |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 470 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 471 | if (napi_schedule_prep(&vi->napi)) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 472 | virtqueue_disable_cb(rvq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 473 | __napi_schedule(&vi->napi); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 474 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 475 | } |
| 476 | |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 477 | static void virtnet_napi_enable(struct virtnet_info *vi) |
| 478 | { |
| 479 | napi_enable(&vi->napi); |
| 480 | |
| 481 | /* If all buffers were filled by other side before we napi_enabled, we |
| 482 | * won't get another interrupt, so process any outstanding packets |
| 483 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
| 484 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
| 485 | if (napi_schedule_prep(&vi->napi)) { |
| 486 | virtqueue_disable_cb(vi->rvq); |
| 487 | __napi_schedule(&vi->napi); |
| 488 | } |
| 489 | } |
| 490 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 491 | static void refill_work(struct work_struct *work) |
| 492 | { |
| 493 | struct virtnet_info *vi; |
| 494 | bool still_empty; |
| 495 | |
| 496 | vi = container_of(work, struct virtnet_info, refill.work); |
| 497 | napi_disable(&vi->napi); |
Herbert Xu | 39d3215 | 2010-01-25 15:51:01 -0800 | [diff] [blame] | 498 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 499 | virtnet_napi_enable(vi); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 500 | |
| 501 | /* In theory, this can happen: if we don't get any buffers in |
| 502 | * we will *never* try to fill again. */ |
| 503 | if (still_empty) |
| 504 | schedule_delayed_work(&vi->refill, HZ/2); |
| 505 | } |
| 506 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 507 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 508 | { |
| 509 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 510 | void *buf; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 511 | unsigned int len, received = 0; |
| 512 | |
| 513 | again: |
| 514 | while (received < budget && |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 515 | (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 516 | receive_buf(vi->dev, buf, len); |
| 517 | --vi->num; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 518 | received++; |
| 519 | } |
| 520 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 521 | if (vi->num < vi->max / 2) { |
| 522 | if (!try_fill_recv(vi, GFP_ATOMIC)) |
| 523 | schedule_delayed_work(&vi->refill, 0); |
| 524 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 525 | |
Rusty Russell | 8329d98 | 2007-11-19 11:20:43 -0500 | [diff] [blame] | 526 | /* Out of packets? */ |
| 527 | if (received < budget) { |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 528 | napi_complete(napi); |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 529 | if (unlikely(!virtqueue_enable_cb(vi->rvq)) && |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 530 | napi_schedule_prep(napi)) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 531 | virtqueue_disable_cb(vi->rvq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 532 | __napi_schedule(napi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 533 | goto again; |
Christian Borntraeger | 4265f16 | 2008-03-14 14:17:05 +0100 | [diff] [blame] | 534 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 535 | } |
| 536 | |
| 537 | return received; |
| 538 | } |
| 539 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 540 | static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 541 | { |
| 542 | struct sk_buff *skb; |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 543 | unsigned int len, tot_sgs = 0; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 544 | struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 545 | |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 546 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 547 | pr_debug("Sent skb %p\n", skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 548 | |
| 549 | u64_stats_update_begin(&stats->syncp); |
| 550 | stats->tx_bytes += skb->len; |
| 551 | stats->tx_packets++; |
| 552 | u64_stats_update_end(&stats->syncp); |
| 553 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 554 | tot_sgs += skb_vnet_hdr(skb)->num_sg; |
Eric Dumazet | ed79bab | 2009-10-14 14:36:43 +0000 | [diff] [blame] | 555 | dev_kfree_skb_any(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 556 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 557 | return tot_sgs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 558 | } |
| 559 | |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 560 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 561 | { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 562 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 563 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 564 | |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 565 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 566 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 567 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 568 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 569 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 570 | hdr->hdr.csum_offset = skb->csum_offset; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 571 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 572 | hdr->hdr.flags = 0; |
| 573 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 574 | } |
| 575 | |
| 576 | if (skb_is_gso(skb)) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 577 | hdr->hdr.hdr_len = skb_headlen(skb); |
| 578 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 579 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 580 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 581 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 582 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 583 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 584 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 585 | else |
| 586 | BUG(); |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 587 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 588 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 589 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 590 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 591 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 592 | } |
| 593 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 594 | hdr->mhdr.num_buffers = 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 595 | |
| 596 | /* Encode metadata header at front. */ |
| 597 | if (vi->mergeable_rx_bufs) |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 598 | sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 599 | else |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 600 | sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 601 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 602 | hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; |
Linus Torvalds | 1756ac3 | 2010-05-21 17:22:52 -0700 | [diff] [blame] | 603 | return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 604 | 0, skb); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 605 | } |
| 606 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 607 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 608 | { |
| 609 | struct virtnet_info *vi = netdev_priv(dev); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 610 | int capacity; |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 611 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 612 | /* Free up any pending old buffers before queueing new ones. */ |
| 613 | free_old_xmit_skbs(vi); |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 614 | |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 615 | /* Try to transmit */ |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 616 | capacity = xmit_skb(vi, skb); |
| 617 | |
| 618 | /* This can happen with OOM and indirect buffers. */ |
| 619 | if (unlikely(capacity < 0)) { |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 620 | if (net_ratelimit()) { |
| 621 | if (likely(capacity == -ENOMEM)) { |
| 622 | dev_warn(&dev->dev, |
| 623 | "TX queue failure: out of memory\n"); |
| 624 | } else { |
| 625 | dev->stats.tx_fifo_errors++; |
| 626 | dev_warn(&dev->dev, |
| 627 | "Unexpected TX queue failure: %d\n", |
| 628 | capacity); |
| 629 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 630 | } |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 631 | dev->stats.tx_dropped++; |
| 632 | kfree_skb(skb); |
| 633 | return NETDEV_TX_OK; |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 634 | } |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 635 | virtqueue_kick(vi->svq); |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 636 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 637 | /* Don't wait up for transmitted skbs to be freed. */ |
| 638 | skb_orphan(skb); |
| 639 | nf_reset(skb); |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 640 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 641 | /* Apparently nice girls don't return TX_BUSY; stop the queue |
| 642 | * before it gets out of hand. Naturally, this wastes entries. */ |
| 643 | if (capacity < 2+MAX_SKB_FRAGS) { |
| 644 | netif_stop_queue(dev); |
Michael S. Tsirkin | 7a66f78 | 2011-05-20 02:11:23 +0300 | [diff] [blame] | 645 | if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 646 | /* More just got used, free them then recheck. */ |
| 647 | capacity += free_old_xmit_skbs(vi); |
| 648 | if (capacity >= 2+MAX_SKB_FRAGS) { |
| 649 | netif_start_queue(dev); |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 650 | virtqueue_disable_cb(vi->svq); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 651 | } |
| 652 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 653 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 654 | |
| 655 | return NETDEV_TX_OK; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 656 | } |
| 657 | |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 658 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 659 | { |
| 660 | struct virtnet_info *vi = netdev_priv(dev); |
| 661 | struct virtio_device *vdev = vi->vdev; |
| 662 | int ret; |
| 663 | |
| 664 | ret = eth_mac_addr(dev, p); |
| 665 | if (ret) |
| 666 | return ret; |
| 667 | |
Alex Williamson | 62994b2 | 2009-04-04 16:40:19 -0700 | [diff] [blame] | 668 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 669 | vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), |
| 670 | dev->dev_addr, dev->addr_len); |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 671 | |
| 672 | return 0; |
| 673 | } |
| 674 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 675 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
| 676 | struct rtnl_link_stats64 *tot) |
| 677 | { |
| 678 | struct virtnet_info *vi = netdev_priv(dev); |
| 679 | int cpu; |
| 680 | unsigned int start; |
| 681 | |
| 682 | for_each_possible_cpu(cpu) { |
| 683 | struct virtnet_stats __percpu *stats |
| 684 | = per_cpu_ptr(vi->stats, cpu); |
| 685 | u64 tpackets, tbytes, rpackets, rbytes; |
| 686 | |
| 687 | do { |
| 688 | start = u64_stats_fetch_begin(&stats->syncp); |
| 689 | tpackets = stats->tx_packets; |
| 690 | tbytes = stats->tx_bytes; |
| 691 | rpackets = stats->rx_packets; |
| 692 | rbytes = stats->rx_bytes; |
| 693 | } while (u64_stats_fetch_retry(&stats->syncp, start)); |
| 694 | |
| 695 | tot->rx_packets += rpackets; |
| 696 | tot->tx_packets += tpackets; |
| 697 | tot->rx_bytes += rbytes; |
| 698 | tot->tx_bytes += tbytes; |
| 699 | } |
| 700 | |
| 701 | tot->tx_dropped = dev->stats.tx_dropped; |
| 702 | tot->rx_dropped = dev->stats.rx_dropped; |
| 703 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 704 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
| 705 | |
| 706 | return tot; |
| 707 | } |
| 708 | |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 709 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 710 | static void virtnet_netpoll(struct net_device *dev) |
| 711 | { |
| 712 | struct virtnet_info *vi = netdev_priv(dev); |
| 713 | |
| 714 | napi_schedule(&vi->napi); |
| 715 | } |
| 716 | #endif |
| 717 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 718 | static int virtnet_open(struct net_device *dev) |
| 719 | { |
| 720 | struct virtnet_info *vi = netdev_priv(dev); |
| 721 | |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 722 | virtnet_napi_enable(vi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 723 | return 0; |
| 724 | } |
| 725 | |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 726 | /* |
| 727 | * Send command via the control virtqueue and check status. Commands |
| 728 | * supported by the hypervisor, as indicated by feature bits, should |
| 729 | * never fail unless improperly formated. |
| 730 | */ |
| 731 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
| 732 | struct scatterlist *data, int out, int in) |
| 733 | { |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 734 | struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 735 | struct virtio_net_ctrl_hdr ctrl; |
| 736 | virtio_net_ctrl_ack status = ~0; |
| 737 | unsigned int tmp; |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 738 | int i; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 739 | |
Alexander Beregalov | 0ee904c | 2009-04-11 14:50:23 +0000 | [diff] [blame] | 740 | /* Caller should know better */ |
| 741 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || |
| 742 | (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 743 | |
| 744 | out++; /* Add header */ |
| 745 | in++; /* Add return status */ |
| 746 | |
| 747 | ctrl.class = class; |
| 748 | ctrl.cmd = cmd; |
| 749 | |
| 750 | sg_init_table(sg, out + in); |
| 751 | |
| 752 | sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 753 | for_each_sg(data, s, out + in - 2, i) |
| 754 | sg_set_buf(&sg[i + 1], sg_virt(s), s->length); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 755 | sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); |
| 756 | |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 757 | BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 758 | |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 759 | virtqueue_kick(vi->cvq); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 760 | |
| 761 | /* |
| 762 | * Spin for a response, the kick causes an ioport write, trapping |
| 763 | * into the hypervisor, so the request should be handled immediately. |
| 764 | */ |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 765 | while (!virtqueue_get_buf(vi->cvq, &tmp)) |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 766 | cpu_relax(); |
| 767 | |
| 768 | return status == VIRTIO_NET_OK; |
| 769 | } |
| 770 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 771 | static int virtnet_close(struct net_device *dev) |
| 772 | { |
| 773 | struct virtnet_info *vi = netdev_priv(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 774 | |
| 775 | napi_disable(&vi->napi); |
| 776 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 777 | return 0; |
| 778 | } |
| 779 | |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 780 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 781 | { |
| 782 | struct virtnet_info *vi = netdev_priv(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 783 | struct scatterlist sg[2]; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 784 | u8 promisc, allmulti; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 785 | struct virtio_net_ctrl_mac *mac_data; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 786 | struct netdev_hw_addr *ha; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 787 | int uc_count; |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 788 | int mc_count; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 789 | void *buf; |
| 790 | int i; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 791 | |
| 792 | /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ |
| 793 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 794 | return; |
| 795 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 796 | promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 797 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 798 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 799 | sg_init_one(sg, &promisc, sizeof(promisc)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 800 | |
| 801 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 802 | VIRTIO_NET_CTRL_RX_PROMISC, |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 803 | sg, 1, 0)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 804 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
| 805 | promisc ? "en" : "dis"); |
| 806 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 807 | sg_init_one(sg, &allmulti, sizeof(allmulti)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 808 | |
| 809 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 810 | VIRTIO_NET_CTRL_RX_ALLMULTI, |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 811 | sg, 1, 0)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 812 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
| 813 | allmulti ? "en" : "dis"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 814 | |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 815 | uc_count = netdev_uc_count(dev); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 816 | mc_count = netdev_mc_count(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 817 | /* MAC filter - use one buffer for both lists */ |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 818 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 819 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 820 | mac_data = buf; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 821 | if (!buf) { |
| 822 | dev_warn(&dev->dev, "No memory for MAC address buffer\n"); |
| 823 | return; |
| 824 | } |
| 825 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 826 | sg_init_table(sg, 2); |
| 827 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 828 | /* Store the unicast list and count in the front of the buffer */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 829 | mac_data->entries = uc_count; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 830 | i = 0; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 831 | netdev_for_each_uc_addr(ha, dev) |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 832 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 833 | |
| 834 | sg_set_buf(&sg[0], mac_data, |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 835 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 836 | |
| 837 | /* multicast list and count fill the end */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 838 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 839 | |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 840 | mac_data->entries = mc_count; |
Jiri Pirko | 567ec87 | 2010-02-23 23:17:07 +0000 | [diff] [blame] | 841 | i = 0; |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 842 | netdev_for_each_mc_addr(ha, dev) |
| 843 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 844 | |
| 845 | sg_set_buf(&sg[1], mac_data, |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 846 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 847 | |
| 848 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
| 849 | VIRTIO_NET_CTRL_MAC_TABLE_SET, |
| 850 | sg, 2, 0)) |
| 851 | dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); |
| 852 | |
| 853 | kfree(buf); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 854 | } |
| 855 | |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 856 | static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 857 | { |
| 858 | struct virtnet_info *vi = netdev_priv(dev); |
| 859 | struct scatterlist sg; |
| 860 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 861 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 862 | |
| 863 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 864 | VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) |
| 865 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
| 866 | } |
| 867 | |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 868 | static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 869 | { |
| 870 | struct virtnet_info *vi = netdev_priv(dev); |
| 871 | struct scatterlist sg; |
| 872 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 873 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 874 | |
| 875 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 876 | VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) |
| 877 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
| 878 | } |
| 879 | |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 880 | static void virtnet_get_ringparam(struct net_device *dev, |
| 881 | struct ethtool_ringparam *ring) |
| 882 | { |
| 883 | struct virtnet_info *vi = netdev_priv(dev); |
| 884 | |
| 885 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq); |
| 886 | ring->tx_max_pending = virtqueue_get_vring_size(vi->svq); |
| 887 | ring->rx_pending = ring->rx_max_pending; |
| 888 | ring->tx_pending = ring->tx_max_pending; |
| 889 | |
| 890 | } |
| 891 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 892 | static const struct ethtool_ops virtnet_ethtool_ops = { |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 893 | .get_link = ethtool_op_get_link, |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 894 | .get_ringparam = virtnet_get_ringparam, |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 895 | }; |
| 896 | |
Mark McLoughlin | 39da581 | 2008-11-26 13:58:11 +0000 | [diff] [blame] | 897 | #define MIN_MTU 68 |
| 898 | #define MAX_MTU 65535 |
| 899 | |
| 900 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) |
| 901 | { |
| 902 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) |
| 903 | return -EINVAL; |
| 904 | dev->mtu = new_mtu; |
| 905 | return 0; |
| 906 | } |
| 907 | |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 908 | static const struct net_device_ops virtnet_netdev = { |
| 909 | .ndo_open = virtnet_open, |
| 910 | .ndo_stop = virtnet_close, |
| 911 | .ndo_start_xmit = start_xmit, |
| 912 | .ndo_validate_addr = eth_validate_addr, |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 913 | .ndo_set_mac_address = virtnet_set_mac_address, |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 914 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 915 | .ndo_change_mtu = virtnet_change_mtu, |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 916 | .ndo_get_stats64 = virtnet_stats, |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 917 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 918 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 919 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 920 | .ndo_poll_controller = virtnet_netpoll, |
| 921 | #endif |
| 922 | }; |
| 923 | |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 924 | static void virtnet_update_status(struct virtnet_info *vi) |
| 925 | { |
| 926 | u16 v; |
| 927 | |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 928 | if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 929 | offsetof(struct virtio_net_config, status), |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 930 | &v) < 0) |
| 931 | return; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 932 | |
| 933 | /* Ignore unknown (future) status bits */ |
| 934 | v &= VIRTIO_NET_S_LINK_UP; |
| 935 | |
| 936 | if (vi->status == v) |
| 937 | return; |
| 938 | |
| 939 | vi->status = v; |
| 940 | |
| 941 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
| 942 | netif_carrier_on(vi->dev); |
| 943 | netif_wake_queue(vi->dev); |
| 944 | } else { |
| 945 | netif_carrier_off(vi->dev); |
| 946 | netif_stop_queue(vi->dev); |
| 947 | } |
| 948 | } |
| 949 | |
| 950 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 951 | { |
| 952 | struct virtnet_info *vi = vdev->priv; |
| 953 | |
| 954 | virtnet_update_status(vi); |
| 955 | } |
| 956 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 957 | static int virtnet_probe(struct virtio_device *vdev) |
| 958 | { |
| 959 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 960 | struct net_device *dev; |
| 961 | struct virtnet_info *vi; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 962 | struct virtqueue *vqs[3]; |
| 963 | vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; |
| 964 | const char *names[] = { "input", "output", "control" }; |
| 965 | int nvqs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 966 | |
| 967 | /* Allocate ourselves a network device with room for our info */ |
| 968 | dev = alloc_etherdev(sizeof(struct virtnet_info)); |
| 969 | if (!dev) |
| 970 | return -ENOMEM; |
| 971 | |
| 972 | /* Set up network device as normal. */ |
Jiri Pirko | 0178934 | 2011-08-16 06:29:00 +0000 | [diff] [blame] | 973 | dev->priv_flags |= IFF_UNICAST_FLT; |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 974 | dev->netdev_ops = &virtnet_netdev; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 975 | dev->features = NETIF_F_HIGHDMA; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 976 | |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 977 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 978 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 979 | |
| 980 | /* Do we support "hardware" checksums? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 981 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 982 | /* This opens up the world of extra features. */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 983 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 984 | if (csum) |
| 985 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 986 | |
| 987 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
| 988 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 989 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 990 | } |
Rusty Russell | 5539ae96 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 991 | /* Individual feature bits: what can host handle? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 992 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 993 | dev->hw_features |= NETIF_F_TSO; |
| 994 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 995 | dev->hw_features |= NETIF_F_TSO6; |
| 996 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 997 | dev->hw_features |= NETIF_F_TSO_ECN; |
| 998 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
| 999 | dev->hw_features |= NETIF_F_UFO; |
| 1000 | |
| 1001 | if (gso) |
| 1002 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
| 1003 | /* (!csum && gso) case will be fixed by register_netdev() */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1004 | } |
| 1005 | |
| 1006 | /* Configuration may specify what MAC to use. Otherwise random. */ |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1007 | if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, |
Rusty Russell | a586d4f | 2008-02-04 23:49:56 -0500 | [diff] [blame] | 1008 | offsetof(struct virtio_net_config, mac), |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1009 | dev->dev_addr, dev->addr_len) < 0) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1010 | random_ether_addr(dev->dev_addr); |
| 1011 | |
| 1012 | /* Set up our device-specific information */ |
| 1013 | vi = netdev_priv(dev); |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 1014 | netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1015 | vi->dev = dev; |
| 1016 | vi->vdev = vdev; |
Christian Borntraeger | d9d5dcc | 2008-02-18 10:02:51 +0100 | [diff] [blame] | 1017 | vdev->priv = vi; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1018 | vi->pages = NULL; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1019 | vi->stats = alloc_percpu(struct virtnet_stats); |
| 1020 | err = -ENOMEM; |
| 1021 | if (vi->stats == NULL) |
| 1022 | goto free; |
| 1023 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1024 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 1025 | sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); |
| 1026 | sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1027 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1028 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 1029 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 1030 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 1031 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1032 | vi->big_packets = true; |
| 1033 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1034 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 1035 | vi->mergeable_rx_bufs = true; |
| 1036 | |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1037 | /* We expect two virtqueues, receive then send, |
| 1038 | * and optionally control. */ |
| 1039 | nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1040 | |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1041 | err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); |
| 1042 | if (err) |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1043 | goto free_stats; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1044 | |
| 1045 | vi->rvq = vqs[0]; |
| 1046 | vi->svq = vqs[1]; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1047 | |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 1048 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1049 | vi->cvq = vqs[2]; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1050 | |
| 1051 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
| 1052 | dev->features |= NETIF_F_HW_VLAN_FILTER; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 1053 | } |
| 1054 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1055 | err = register_netdev(dev); |
| 1056 | if (err) { |
| 1057 | pr_debug("virtio_net: registering device failed\n"); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1058 | goto free_vqs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1059 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1060 | |
| 1061 | /* Last of all, set up some receive buffers. */ |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1062 | try_fill_recv(vi, GFP_KERNEL); |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1063 | |
| 1064 | /* If we didn't even get one input buffer, we're useless. */ |
| 1065 | if (vi->num == 0) { |
| 1066 | err = -ENOMEM; |
| 1067 | goto unregister; |
| 1068 | } |
| 1069 | |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 1070 | /* Assume link up if device can't report link status, |
| 1071 | otherwise get link status from config. */ |
| 1072 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
| 1073 | netif_carrier_off(dev); |
| 1074 | virtnet_update_status(vi); |
| 1075 | } else { |
| 1076 | vi->status = VIRTIO_NET_S_LINK_UP; |
| 1077 | netif_carrier_on(dev); |
| 1078 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1079 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1080 | pr_debug("virtnet: registered device %s\n", dev->name); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1081 | return 0; |
| 1082 | |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1083 | unregister: |
| 1084 | unregister_netdev(dev); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1085 | cancel_delayed_work_sync(&vi->refill); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1086 | free_vqs: |
| 1087 | vdev->config->del_vqs(vdev); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1088 | free_stats: |
| 1089 | free_percpu(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1090 | free: |
| 1091 | free_netdev(dev); |
| 1092 | return err; |
| 1093 | } |
| 1094 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1095 | static void free_unused_bufs(struct virtnet_info *vi) |
| 1096 | { |
| 1097 | void *buf; |
| 1098 | while (1) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 1099 | buf = virtqueue_detach_unused_buf(vi->svq); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 1100 | if (!buf) |
| 1101 | break; |
| 1102 | dev_kfree_skb(buf); |
| 1103 | } |
| 1104 | while (1) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 1105 | buf = virtqueue_detach_unused_buf(vi->rvq); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1106 | if (!buf) |
| 1107 | break; |
| 1108 | if (vi->mergeable_rx_bufs || vi->big_packets) |
| 1109 | give_pages(vi, buf); |
| 1110 | else |
| 1111 | dev_kfree_skb(buf); |
| 1112 | --vi->num; |
| 1113 | } |
| 1114 | BUG_ON(vi->num != 0); |
| 1115 | } |
| 1116 | |
Uwe Kleine-König | 3d1285b | 2009-09-30 22:28:34 +0000 | [diff] [blame] | 1117 | static void __devexit virtnet_remove(struct virtio_device *vdev) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1118 | { |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 1119 | struct virtnet_info *vi = vdev->priv; |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1120 | |
Rusty Russell | 6e5aa7e | 2008-02-04 23:50:03 -0500 | [diff] [blame] | 1121 | /* Stop all the virtqueues. */ |
| 1122 | vdev->config->reset(vdev); |
| 1123 | |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1124 | |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 1125 | unregister_netdev(vi->dev); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1126 | cancel_delayed_work_sync(&vi->refill); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 1127 | |
| 1128 | /* Free unused buffers in both send and recv, if any. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1129 | free_unused_bufs(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1130 | |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1131 | vdev->config->del_vqs(vi->vdev); |
| 1132 | |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1133 | while (vi->pages) |
| 1134 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); |
| 1135 | |
Krishna Kumar | 2e66f55 | 2011-07-20 03:56:02 +0000 | [diff] [blame] | 1136 | free_percpu(vi->stats); |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 1137 | free_netdev(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1138 | } |
| 1139 | |
| 1140 | static struct virtio_device_id id_table[] = { |
| 1141 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 1142 | { 0 }, |
| 1143 | }; |
| 1144 | |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1145 | static unsigned int features[] = { |
Mark McLoughlin | 5e4fe5c | 2008-07-08 17:10:42 +1000 | [diff] [blame] | 1146 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
| 1147 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1148 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1149 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
Sridhar Samudrala | 5c51675 | 2009-07-14 14:21:02 +0000 | [diff] [blame] | 1150 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 1151 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1152 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1153 | }; |
| 1154 | |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1155 | static struct virtio_driver virtio_net_driver = { |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1156 | .feature_table = features, |
| 1157 | .feature_table_size = ARRAY_SIZE(features), |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1158 | .driver.name = KBUILD_MODNAME, |
| 1159 | .driver.owner = THIS_MODULE, |
| 1160 | .id_table = id_table, |
| 1161 | .probe = virtnet_probe, |
| 1162 | .remove = __devexit_p(virtnet_remove), |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1163 | .config_changed = virtnet_config_changed, |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1164 | }; |
| 1165 | |
| 1166 | static int __init init(void) |
| 1167 | { |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1168 | return register_virtio_driver(&virtio_net_driver); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1169 | } |
| 1170 | |
| 1171 | static void __exit fini(void) |
| 1172 | { |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1173 | unregister_virtio_driver(&virtio_net_driver); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1174 | } |
| 1175 | module_init(init); |
| 1176 | module_exit(fini); |
| 1177 | |
| 1178 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 1179 | MODULE_DESCRIPTION("Virtio network driver"); |
| 1180 | MODULE_LICENSE("GPL"); |