Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1 | /* A network driver using virtio. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2 | * |
| 3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | //#define DEBUG |
| 20 | #include <linux/netdevice.h> |
| 21 | #include <linux/etherdevice.h> |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 22 | #include <linux/ethtool.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 23 | #include <linux/module.h> |
| 24 | #include <linux/virtio.h> |
| 25 | #include <linux/virtio_net.h> |
| 26 | #include <linux/scatterlist.h> |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 27 | #include <linux/if_vlan.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 29 | |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 30 | static int napi_weight = 128; |
| 31 | module_param(napi_weight, int, 0444); |
| 32 | |
Rusty Russell | eb93992 | 2011-12-19 14:08:01 +0000 | [diff] [blame] | 33 | static bool csum = true, gso = true; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 34 | module_param(csum, bool, 0444); |
| 35 | module_param(gso, bool, 0444); |
| 36 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 37 | /* FIXME: MTU in config. */ |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 38 | #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 39 | #define GOOD_COPY_LEN 128 |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 40 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 41 | #define VIRTNET_SEND_COMMAND_SG_MAX 2 |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 42 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 43 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 44 | struct virtnet_stats { |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 45 | struct u64_stats_sync tx_syncp; |
| 46 | struct u64_stats_sync rx_syncp; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 47 | u64 tx_bytes; |
| 48 | u64 tx_packets; |
| 49 | |
| 50 | u64 rx_bytes; |
| 51 | u64 rx_packets; |
| 52 | }; |
| 53 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 54 | struct virtnet_info { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 55 | struct virtio_device *vdev; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 56 | struct virtqueue *rvq, *svq, *cvq; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 57 | struct net_device *dev; |
| 58 | struct napi_struct napi; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 59 | unsigned int status; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 60 | |
| 61 | /* Number of input buffers, and max we've ever had. */ |
| 62 | unsigned int num, max; |
| 63 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 64 | /* I like... big packets and I cannot lie! */ |
| 65 | bool big_packets; |
| 66 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 67 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 68 | bool mergeable_rx_bufs; |
| 69 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 70 | /* enable config space updates */ |
| 71 | bool config_enable; |
| 72 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 73 | /* Active statistics */ |
| 74 | struct virtnet_stats __percpu *stats; |
| 75 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 76 | /* Work struct for refilling if we run low on memory. */ |
| 77 | struct delayed_work refill; |
| 78 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 79 | /* Work struct for config space updates */ |
| 80 | struct work_struct config_work; |
| 81 | |
| 82 | /* Lock for config space updates */ |
| 83 | struct mutex config_lock; |
| 84 | |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 85 | /* Chain pages by the private ptr. */ |
| 86 | struct page *pages; |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 87 | |
| 88 | /* fragments + linear part + virtio header */ |
| 89 | struct scatterlist rx_sg[MAX_SKB_FRAGS + 2]; |
| 90 | struct scatterlist tx_sg[MAX_SKB_FRAGS + 2]; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 91 | }; |
| 92 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 93 | struct skb_vnet_hdr { |
| 94 | union { |
| 95 | struct virtio_net_hdr hdr; |
| 96 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
| 97 | }; |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 98 | unsigned int num_sg; |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 99 | }; |
| 100 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 101 | struct padded_vnet_hdr { |
| 102 | struct virtio_net_hdr hdr; |
| 103 | /* |
| 104 | * virtio_net_hdr should be in a separated sg buffer because of a |
| 105 | * QEMU bug, and data sg buffer shares same page with this header sg. |
| 106 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. |
| 107 | */ |
| 108 | char padding[6]; |
| 109 | }; |
| 110 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 111 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 112 | { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 113 | return (struct skb_vnet_hdr *)skb->cb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 114 | } |
| 115 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 116 | /* |
| 117 | * private is used to chain pages for big packets, put the whole |
| 118 | * most recent used list in the beginning for reuse |
| 119 | */ |
| 120 | static void give_pages(struct virtnet_info *vi, struct page *page) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 121 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 122 | struct page *end; |
| 123 | |
| 124 | /* Find end of list, sew whole thing into vi->pages. */ |
| 125 | for (end = page; end->private; end = (struct page *)end->private); |
| 126 | end->private = (unsigned long)vi->pages; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 127 | vi->pages = page; |
| 128 | } |
| 129 | |
| 130 | static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) |
| 131 | { |
| 132 | struct page *p = vi->pages; |
| 133 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 134 | if (p) { |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 135 | vi->pages = (struct page *)p->private; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 136 | /* clear private here, it is used to chain pages */ |
| 137 | p->private = 0; |
| 138 | } else |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 139 | p = alloc_page(gfp_mask); |
| 140 | return p; |
| 141 | } |
| 142 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 143 | static void skb_xmit_done(struct virtqueue *svq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 144 | { |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 145 | struct virtnet_info *vi = svq->vdev->priv; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 146 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 147 | /* Suppress further interrupts. */ |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 148 | virtqueue_disable_cb(svq); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 149 | |
Rusty Russell | 363f151 | 2008-06-08 20:51:55 +1000 | [diff] [blame] | 150 | /* We were probably waiting for more output buffers. */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 151 | netif_wake_queue(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 152 | } |
| 153 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 154 | static void set_skb_frag(struct sk_buff *skb, struct page *page, |
| 155 | unsigned int offset, unsigned int *len) |
| 156 | { |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 157 | int size = min((unsigned)PAGE_SIZE - offset, *len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 158 | int i = skb_shinfo(skb)->nr_frags; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 159 | |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 160 | __skb_fill_page_desc(skb, i, page, offset, size); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 161 | |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 162 | skb->data_len += size; |
| 163 | skb->len += size; |
Eric Dumazet | 4b72736 | 2011-10-19 23:14:46 +0000 | [diff] [blame] | 164 | skb->truesize += PAGE_SIZE; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 165 | skb_shinfo(skb)->nr_frags++; |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 166 | *len -= size; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 167 | } |
| 168 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 169 | /* Called from bottom half context */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 170 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 171 | struct page *page, unsigned int len) |
| 172 | { |
| 173 | struct sk_buff *skb; |
| 174 | struct skb_vnet_hdr *hdr; |
| 175 | unsigned int copy, hdr_len, offset; |
| 176 | char *p; |
| 177 | |
| 178 | p = page_address(page); |
| 179 | |
| 180 | /* copy small packet so we can reuse these pages for small data */ |
| 181 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); |
| 182 | if (unlikely(!skb)) |
| 183 | return NULL; |
| 184 | |
| 185 | hdr = skb_vnet_hdr(skb); |
| 186 | |
| 187 | if (vi->mergeable_rx_bufs) { |
| 188 | hdr_len = sizeof hdr->mhdr; |
| 189 | offset = hdr_len; |
| 190 | } else { |
| 191 | hdr_len = sizeof hdr->hdr; |
| 192 | offset = sizeof(struct padded_vnet_hdr); |
| 193 | } |
| 194 | |
| 195 | memcpy(hdr, p, hdr_len); |
| 196 | |
| 197 | len -= hdr_len; |
| 198 | p += offset; |
| 199 | |
| 200 | copy = len; |
| 201 | if (copy > skb_tailroom(skb)) |
| 202 | copy = skb_tailroom(skb); |
| 203 | memcpy(skb_put(skb, copy), p, copy); |
| 204 | |
| 205 | len -= copy; |
| 206 | offset += copy; |
| 207 | |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 208 | /* |
| 209 | * Verify that we can indeed put this data into a skb. |
| 210 | * This is here to handle cases when the device erroneously |
| 211 | * tries to receive more than is possible. This is usually |
| 212 | * the case of a broken device. |
| 213 | */ |
| 214 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
| 215 | if (net_ratelimit()) |
| 216 | pr_debug("%s: too much data\n", skb->dev->name); |
| 217 | dev_kfree_skb(skb); |
| 218 | return NULL; |
| 219 | } |
| 220 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 221 | while (len) { |
| 222 | set_skb_frag(skb, page, offset, &len); |
| 223 | page = (struct page *)page->private; |
| 224 | offset = 0; |
| 225 | } |
| 226 | |
| 227 | if (page) |
| 228 | give_pages(vi, page); |
| 229 | |
| 230 | return skb; |
| 231 | } |
| 232 | |
| 233 | static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) |
| 234 | { |
| 235 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
| 236 | struct page *page; |
| 237 | int num_buf, i, len; |
| 238 | |
| 239 | num_buf = hdr->mhdr.num_buffers; |
| 240 | while (--num_buf) { |
| 241 | i = skb_shinfo(skb)->nr_frags; |
| 242 | if (i >= MAX_SKB_FRAGS) { |
| 243 | pr_debug("%s: packet too long\n", skb->dev->name); |
| 244 | skb->dev->stats.rx_length_errors++; |
| 245 | return -EINVAL; |
| 246 | } |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 247 | page = virtqueue_get_buf(vi->rvq, &len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 248 | if (!page) { |
| 249 | pr_debug("%s: rx error: %d buffers missing\n", |
| 250 | skb->dev->name, hdr->mhdr.num_buffers); |
| 251 | skb->dev->stats.rx_length_errors++; |
| 252 | return -EINVAL; |
| 253 | } |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 254 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 255 | if (len > PAGE_SIZE) |
| 256 | len = PAGE_SIZE; |
| 257 | |
| 258 | set_skb_frag(skb, page, 0, &len); |
| 259 | |
| 260 | --vi->num; |
| 261 | } |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | static void receive_buf(struct net_device *dev, void *buf, unsigned int len) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 266 | { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 267 | struct virtnet_info *vi = netdev_priv(dev); |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 268 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 269 | struct sk_buff *skb; |
| 270 | struct page *page; |
| 271 | struct skb_vnet_hdr *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 272 | |
| 273 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
| 274 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 275 | dev->stats.rx_length_errors++; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 276 | if (vi->mergeable_rx_bufs || vi->big_packets) |
| 277 | give_pages(vi, buf); |
| 278 | else |
| 279 | dev_kfree_skb(buf); |
| 280 | return; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 281 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 282 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 283 | if (!vi->mergeable_rx_bufs && !vi->big_packets) { |
| 284 | skb = buf; |
| 285 | len -= sizeof(struct virtio_net_hdr); |
| 286 | skb_trim(skb, len); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 287 | } else { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 288 | page = buf; |
| 289 | skb = page_to_skb(vi, page, len); |
| 290 | if (unlikely(!skb)) { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 291 | dev->stats.rx_dropped++; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 292 | give_pages(vi, page); |
| 293 | return; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 294 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 295 | if (vi->mergeable_rx_bufs) |
| 296 | if (receive_mergeable(vi, skb)) { |
| 297 | dev_kfree_skb(skb); |
| 298 | return; |
| 299 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 300 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 301 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 302 | hdr = skb_vnet_hdr(skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 303 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 304 | u64_stats_update_begin(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 305 | stats->rx_bytes += skb->len; |
| 306 | stats->rx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 307 | u64_stats_update_end(&stats->rx_syncp); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 308 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 309 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 310 | pr_debug("Needs csum!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 311 | if (!skb_partial_csum_set(skb, |
| 312 | hdr->hdr.csum_start, |
| 313 | hdr->hdr.csum_offset)) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 314 | goto frame_err; |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 315 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { |
| 316 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 317 | } |
| 318 | |
Mark McLoughlin | 23cde76 | 2008-06-08 20:49:00 +1000 | [diff] [blame] | 319 | skb->protocol = eth_type_trans(skb, dev); |
| 320 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 321 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 322 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 323 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 324 | pr_debug("GSO!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 325 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 326 | case VIRTIO_NET_HDR_GSO_TCPV4: |
| 327 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
| 328 | break; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 329 | case VIRTIO_NET_HDR_GSO_UDP: |
| 330 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
| 331 | break; |
| 332 | case VIRTIO_NET_HDR_GSO_TCPV6: |
| 333 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
| 334 | break; |
| 335 | default: |
| 336 | if (net_ratelimit()) |
| 337 | printk(KERN_WARNING "%s: bad gso type %u.\n", |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 338 | dev->name, hdr->hdr.gso_type); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 339 | goto frame_err; |
| 340 | } |
| 341 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 342 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 343 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
| 344 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 345 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 346 | if (skb_shinfo(skb)->gso_size == 0) { |
| 347 | if (net_ratelimit()) |
| 348 | printk(KERN_WARNING "%s: zero gso size.\n", |
| 349 | dev->name); |
| 350 | goto frame_err; |
| 351 | } |
| 352 | |
| 353 | /* Header must be checked, and gso_segs computed. */ |
| 354 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 355 | skb_shinfo(skb)->gso_segs = 0; |
| 356 | } |
| 357 | |
| 358 | netif_receive_skb(skb); |
| 359 | return; |
| 360 | |
| 361 | frame_err: |
| 362 | dev->stats.rx_frame_errors++; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 363 | dev_kfree_skb(skb); |
| 364 | } |
| 365 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 366 | static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 367 | { |
| 368 | struct sk_buff *skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 369 | struct skb_vnet_hdr *hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 370 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 371 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 372 | skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 373 | if (unlikely(!skb)) |
| 374 | return -ENOMEM; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 375 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 376 | skb_put(skb, MAX_PACKET_LEN); |
| 377 | |
| 378 | hdr = skb_vnet_hdr(skb); |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 379 | sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 380 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 381 | skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 382 | |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 383 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 384 | if (err < 0) |
| 385 | dev_kfree_skb(skb); |
| 386 | |
| 387 | return err; |
| 388 | } |
| 389 | |
| 390 | static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) |
| 391 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 392 | struct page *first, *list = NULL; |
| 393 | char *p; |
| 394 | int i, err, offset; |
| 395 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 396 | /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 397 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
| 398 | first = get_a_page(vi, gfp); |
| 399 | if (!first) { |
| 400 | if (list) |
| 401 | give_pages(vi, list); |
| 402 | return -ENOMEM; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 403 | } |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 404 | sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 405 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 406 | /* chain new page in list head to match sg */ |
| 407 | first->private = (unsigned long)list; |
| 408 | list = first; |
| 409 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 410 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 411 | first = get_a_page(vi, gfp); |
| 412 | if (!first) { |
| 413 | give_pages(vi, list); |
| 414 | return -ENOMEM; |
| 415 | } |
| 416 | p = page_address(first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 417 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 418 | /* vi->rx_sg[0], vi->rx_sg[1] share the same page */ |
| 419 | /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ |
| 420 | sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 421 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 422 | /* vi->rx_sg[1] for data packet, from offset */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 423 | offset = sizeof(struct padded_vnet_hdr); |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 424 | sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 425 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 426 | /* chain first in list head */ |
| 427 | first->private = (unsigned long)list; |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 428 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, |
| 429 | first, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 430 | if (err < 0) |
| 431 | give_pages(vi, first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 432 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 433 | return err; |
| 434 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 435 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 436 | static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) |
| 437 | { |
| 438 | struct page *page; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 439 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 440 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 441 | page = get_a_page(vi, gfp); |
| 442 | if (!page) |
| 443 | return -ENOMEM; |
| 444 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 445 | sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 446 | |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 447 | err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 448 | if (err < 0) |
| 449 | give_pages(vi, page); |
| 450 | |
| 451 | return err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 452 | } |
| 453 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 454 | /* |
| 455 | * Returns false if we couldn't fill entirely (OOM). |
| 456 | * |
| 457 | * Normally run in the receive path, but can also be run from ndo_open |
| 458 | * before we're receiving packets, or from refill_work which is |
| 459 | * careful to disable receiving (using napi_disable). |
| 460 | */ |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 461 | static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 462 | { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 463 | int err; |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 464 | bool oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 465 | |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 466 | do { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 467 | if (vi->mergeable_rx_bufs) |
| 468 | err = add_recvbuf_mergeable(vi, gfp); |
| 469 | else if (vi->big_packets) |
| 470 | err = add_recvbuf_big(vi, gfp); |
| 471 | else |
| 472 | err = add_recvbuf_small(vi, gfp); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 473 | |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 474 | oom = err == -ENOMEM; |
| 475 | if (err < 0) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 476 | break; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 477 | ++vi->num; |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 478 | } while (err > 0); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 479 | if (unlikely(vi->num > vi->max)) |
| 480 | vi->max = vi->num; |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 481 | virtqueue_kick(vi->rvq); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 482 | return !oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 483 | } |
| 484 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 485 | static void skb_recv_done(struct virtqueue *rvq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 486 | { |
| 487 | struct virtnet_info *vi = rvq->vdev->priv; |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 488 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 489 | if (napi_schedule_prep(&vi->napi)) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 490 | virtqueue_disable_cb(rvq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 491 | __napi_schedule(&vi->napi); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 492 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 493 | } |
| 494 | |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 495 | static void virtnet_napi_enable(struct virtnet_info *vi) |
| 496 | { |
| 497 | napi_enable(&vi->napi); |
| 498 | |
| 499 | /* If all buffers were filled by other side before we napi_enabled, we |
| 500 | * won't get another interrupt, so process any outstanding packets |
| 501 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
| 502 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
| 503 | if (napi_schedule_prep(&vi->napi)) { |
| 504 | virtqueue_disable_cb(vi->rvq); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 505 | local_bh_disable(); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 506 | __napi_schedule(&vi->napi); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 507 | local_bh_enable(); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 508 | } |
| 509 | } |
| 510 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 511 | static void refill_work(struct work_struct *work) |
| 512 | { |
| 513 | struct virtnet_info *vi; |
| 514 | bool still_empty; |
| 515 | |
| 516 | vi = container_of(work, struct virtnet_info, refill.work); |
| 517 | napi_disable(&vi->napi); |
Herbert Xu | 39d3215 | 2010-01-25 15:51:01 -0800 | [diff] [blame] | 518 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 519 | virtnet_napi_enable(vi); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 520 | |
| 521 | /* In theory, this can happen: if we don't get any buffers in |
| 522 | * we will *never* try to fill again. */ |
| 523 | if (still_empty) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 524 | schedule_delayed_work(&vi->refill, HZ/2); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 525 | } |
| 526 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 527 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 528 | { |
| 529 | struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 530 | void *buf; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 531 | unsigned int len, received = 0; |
| 532 | |
| 533 | again: |
| 534 | while (received < budget && |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 535 | (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 536 | receive_buf(vi->dev, buf, len); |
| 537 | --vi->num; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 538 | received++; |
| 539 | } |
| 540 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 541 | if (vi->num < vi->max / 2) { |
| 542 | if (!try_fill_recv(vi, GFP_ATOMIC)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 543 | schedule_delayed_work(&vi->refill, 0); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 544 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 545 | |
Rusty Russell | 8329d98 | 2007-11-19 11:20:43 -0500 | [diff] [blame] | 546 | /* Out of packets? */ |
| 547 | if (received < budget) { |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 548 | napi_complete(napi); |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 549 | if (unlikely(!virtqueue_enable_cb(vi->rvq)) && |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 550 | napi_schedule_prep(napi)) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 551 | virtqueue_disable_cb(vi->rvq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 552 | __napi_schedule(napi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 553 | goto again; |
Christian Borntraeger | 4265f16 | 2008-03-14 14:17:05 +0100 | [diff] [blame] | 554 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 555 | } |
| 556 | |
| 557 | return received; |
| 558 | } |
| 559 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 560 | static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 561 | { |
| 562 | struct sk_buff *skb; |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 563 | unsigned int len, tot_sgs = 0; |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 564 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 565 | |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 566 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 567 | pr_debug("Sent skb %p\n", skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 568 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 569 | u64_stats_update_begin(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 570 | stats->tx_bytes += skb->len; |
| 571 | stats->tx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 572 | u64_stats_update_end(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 573 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 574 | tot_sgs += skb_vnet_hdr(skb)->num_sg; |
Eric Dumazet | ed79bab | 2009-10-14 14:36:43 +0000 | [diff] [blame] | 575 | dev_kfree_skb_any(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 576 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 577 | return tot_sgs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 578 | } |
| 579 | |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 580 | static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 581 | { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 582 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 583 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 584 | |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 585 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 586 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 587 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 588 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 589 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 590 | hdr->hdr.csum_offset = skb->csum_offset; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 591 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 592 | hdr->hdr.flags = 0; |
| 593 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | if (skb_is_gso(skb)) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 597 | hdr->hdr.hdr_len = skb_headlen(skb); |
| 598 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 599 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 600 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 601 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 602 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 603 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 604 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 605 | else |
| 606 | BUG(); |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 607 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 608 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 609 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 610 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 611 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 612 | } |
| 613 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 614 | hdr->mhdr.num_buffers = 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 615 | |
| 616 | /* Encode metadata header at front. */ |
| 617 | if (vi->mergeable_rx_bufs) |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 618 | sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 619 | else |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 620 | sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 621 | |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 622 | hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; |
Linus Torvalds | 1756ac3 | 2010-05-21 17:22:52 -0700 | [diff] [blame] | 623 | return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 624 | 0, skb, GFP_ATOMIC); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 625 | } |
| 626 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 627 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 628 | { |
| 629 | struct virtnet_info *vi = netdev_priv(dev); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 630 | int capacity; |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 631 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 632 | /* Free up any pending old buffers before queueing new ones. */ |
| 633 | free_old_xmit_skbs(vi); |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 634 | |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 635 | /* Try to transmit */ |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 636 | capacity = xmit_skb(vi, skb); |
| 637 | |
| 638 | /* This can happen with OOM and indirect buffers. */ |
| 639 | if (unlikely(capacity < 0)) { |
Rick Jones | 2e57b79 | 2012-03-27 07:28:09 +0000 | [diff] [blame] | 640 | if (likely(capacity == -ENOMEM)) { |
Torsten Kaiser | 3130416 | 2012-04-09 05:14:15 +0000 | [diff] [blame] | 641 | if (net_ratelimit()) |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 642 | dev_warn(&dev->dev, |
| 643 | "TX queue failure: out of memory\n"); |
Torsten Kaiser | 3130416 | 2012-04-09 05:14:15 +0000 | [diff] [blame] | 644 | } else { |
Rick Jones | 2e57b79 | 2012-03-27 07:28:09 +0000 | [diff] [blame] | 645 | dev->stats.tx_fifo_errors++; |
| 646 | if (net_ratelimit()) |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 647 | dev_warn(&dev->dev, |
| 648 | "Unexpected TX queue failure: %d\n", |
| 649 | capacity); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 650 | } |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 651 | dev->stats.tx_dropped++; |
| 652 | kfree_skb(skb); |
| 653 | return NETDEV_TX_OK; |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 654 | } |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 655 | virtqueue_kick(vi->svq); |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 656 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 657 | /* Don't wait up for transmitted skbs to be freed. */ |
| 658 | skb_orphan(skb); |
| 659 | nf_reset(skb); |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 660 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 661 | /* Apparently nice girls don't return TX_BUSY; stop the queue |
| 662 | * before it gets out of hand. Naturally, this wastes entries. */ |
| 663 | if (capacity < 2+MAX_SKB_FRAGS) { |
| 664 | netif_stop_queue(dev); |
Michael S. Tsirkin | 7a66f78 | 2011-05-20 02:11:23 +0300 | [diff] [blame] | 665 | if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 666 | /* More just got used, free them then recheck. */ |
| 667 | capacity += free_old_xmit_skbs(vi); |
| 668 | if (capacity >= 2+MAX_SKB_FRAGS) { |
| 669 | netif_start_queue(dev); |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 670 | virtqueue_disable_cb(vi->svq); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 671 | } |
| 672 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 673 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 674 | |
| 675 | return NETDEV_TX_OK; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 676 | } |
| 677 | |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 678 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 679 | { |
| 680 | struct virtnet_info *vi = netdev_priv(dev); |
| 681 | struct virtio_device *vdev = vi->vdev; |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 682 | int ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 683 | |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 684 | ret = eth_mac_addr(dev, p); |
| 685 | if (ret) |
| 686 | return ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 687 | |
Alex Williamson | 62994b2 | 2009-04-04 16:40:19 -0700 | [diff] [blame] | 688 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 689 | vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), |
| 690 | dev->dev_addr, dev->addr_len); |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 691 | |
| 692 | return 0; |
| 693 | } |
| 694 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 695 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
| 696 | struct rtnl_link_stats64 *tot) |
| 697 | { |
| 698 | struct virtnet_info *vi = netdev_priv(dev); |
| 699 | int cpu; |
| 700 | unsigned int start; |
| 701 | |
| 702 | for_each_possible_cpu(cpu) { |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 703 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 704 | u64 tpackets, tbytes, rpackets, rbytes; |
| 705 | |
| 706 | do { |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 707 | start = u64_stats_fetch_begin_bh(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 708 | tpackets = stats->tx_packets; |
| 709 | tbytes = stats->tx_bytes; |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 710 | } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 711 | |
| 712 | do { |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 713 | start = u64_stats_fetch_begin_bh(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 714 | rpackets = stats->rx_packets; |
| 715 | rbytes = stats->rx_bytes; |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 716 | } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 717 | |
| 718 | tot->rx_packets += rpackets; |
| 719 | tot->tx_packets += tpackets; |
| 720 | tot->rx_bytes += rbytes; |
| 721 | tot->tx_bytes += tbytes; |
| 722 | } |
| 723 | |
| 724 | tot->tx_dropped = dev->stats.tx_dropped; |
Rick Jones | 021ac8d | 2011-11-21 09:28:17 +0000 | [diff] [blame] | 725 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 726 | tot->rx_dropped = dev->stats.rx_dropped; |
| 727 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 728 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
| 729 | |
| 730 | return tot; |
| 731 | } |
| 732 | |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 733 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 734 | static void virtnet_netpoll(struct net_device *dev) |
| 735 | { |
| 736 | struct virtnet_info *vi = netdev_priv(dev); |
| 737 | |
| 738 | napi_schedule(&vi->napi); |
| 739 | } |
| 740 | #endif |
| 741 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 742 | static int virtnet_open(struct net_device *dev) |
| 743 | { |
| 744 | struct virtnet_info *vi = netdev_priv(dev); |
| 745 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 746 | /* Make sure we have some buffers: if oom use wq. */ |
| 747 | if (!try_fill_recv(vi, GFP_KERNEL)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 748 | schedule_delayed_work(&vi->refill, 0); |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 749 | |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 750 | virtnet_napi_enable(vi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 751 | return 0; |
| 752 | } |
| 753 | |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 754 | /* |
| 755 | * Send command via the control virtqueue and check status. Commands |
| 756 | * supported by the hypervisor, as indicated by feature bits, should |
| 757 | * never fail unless improperly formated. |
| 758 | */ |
| 759 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
| 760 | struct scatterlist *data, int out, int in) |
| 761 | { |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 762 | struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 763 | struct virtio_net_ctrl_hdr ctrl; |
| 764 | virtio_net_ctrl_ack status = ~0; |
| 765 | unsigned int tmp; |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 766 | int i; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 767 | |
Alexander Beregalov | 0ee904c | 2009-04-11 14:50:23 +0000 | [diff] [blame] | 768 | /* Caller should know better */ |
| 769 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || |
| 770 | (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 771 | |
| 772 | out++; /* Add header */ |
| 773 | in++; /* Add return status */ |
| 774 | |
| 775 | ctrl.class = class; |
| 776 | ctrl.cmd = cmd; |
| 777 | |
| 778 | sg_init_table(sg, out + in); |
| 779 | |
| 780 | sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 781 | for_each_sg(data, s, out + in - 2, i) |
| 782 | sg_set_buf(&sg[i + 1], sg_virt(s), s->length); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 783 | sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); |
| 784 | |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 785 | BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 786 | |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 787 | virtqueue_kick(vi->cvq); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 788 | |
| 789 | /* |
| 790 | * Spin for a response, the kick causes an ioport write, trapping |
| 791 | * into the hypervisor, so the request should be handled immediately. |
| 792 | */ |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 793 | while (!virtqueue_get_buf(vi->cvq, &tmp)) |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 794 | cpu_relax(); |
| 795 | |
| 796 | return status == VIRTIO_NET_OK; |
| 797 | } |
| 798 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 799 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 800 | { |
| 801 | rtnl_lock(); |
| 802 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
| 803 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, |
| 804 | 0, 0)) |
| 805 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
| 806 | rtnl_unlock(); |
| 807 | } |
| 808 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 809 | static int virtnet_close(struct net_device *dev) |
| 810 | { |
| 811 | struct virtnet_info *vi = netdev_priv(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 812 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 813 | /* Make sure refill_work doesn't re-enable napi! */ |
| 814 | cancel_delayed_work_sync(&vi->refill); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 815 | napi_disable(&vi->napi); |
| 816 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 817 | return 0; |
| 818 | } |
| 819 | |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 820 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 821 | { |
| 822 | struct virtnet_info *vi = netdev_priv(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 823 | struct scatterlist sg[2]; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 824 | u8 promisc, allmulti; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 825 | struct virtio_net_ctrl_mac *mac_data; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 826 | struct netdev_hw_addr *ha; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 827 | int uc_count; |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 828 | int mc_count; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 829 | void *buf; |
| 830 | int i; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 831 | |
| 832 | /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ |
| 833 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 834 | return; |
| 835 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 836 | promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 837 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 838 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 839 | sg_init_one(sg, &promisc, sizeof(promisc)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 840 | |
| 841 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 842 | VIRTIO_NET_CTRL_RX_PROMISC, |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 843 | sg, 1, 0)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 844 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
| 845 | promisc ? "en" : "dis"); |
| 846 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 847 | sg_init_one(sg, &allmulti, sizeof(allmulti)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 848 | |
| 849 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 850 | VIRTIO_NET_CTRL_RX_ALLMULTI, |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 851 | sg, 1, 0)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 852 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
| 853 | allmulti ? "en" : "dis"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 854 | |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 855 | uc_count = netdev_uc_count(dev); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 856 | mc_count = netdev_mc_count(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 857 | /* MAC filter - use one buffer for both lists */ |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 858 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 859 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 860 | mac_data = buf; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 861 | if (!buf) { |
| 862 | dev_warn(&dev->dev, "No memory for MAC address buffer\n"); |
| 863 | return; |
| 864 | } |
| 865 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 866 | sg_init_table(sg, 2); |
| 867 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 868 | /* Store the unicast list and count in the front of the buffer */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 869 | mac_data->entries = uc_count; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 870 | i = 0; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 871 | netdev_for_each_uc_addr(ha, dev) |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 872 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 873 | |
| 874 | sg_set_buf(&sg[0], mac_data, |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 875 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 876 | |
| 877 | /* multicast list and count fill the end */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 878 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 879 | |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 880 | mac_data->entries = mc_count; |
Jiri Pirko | 567ec87 | 2010-02-23 23:17:07 +0000 | [diff] [blame] | 881 | i = 0; |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 882 | netdev_for_each_mc_addr(ha, dev) |
| 883 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 884 | |
| 885 | sg_set_buf(&sg[1], mac_data, |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 886 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 887 | |
| 888 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
| 889 | VIRTIO_NET_CTRL_MAC_TABLE_SET, |
| 890 | sg, 2, 0)) |
| 891 | dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); |
| 892 | |
| 893 | kfree(buf); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 894 | } |
| 895 | |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 896 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 897 | { |
| 898 | struct virtnet_info *vi = netdev_priv(dev); |
| 899 | struct scatterlist sg; |
| 900 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 901 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 902 | |
| 903 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 904 | VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) |
| 905 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 906 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 907 | } |
| 908 | |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 909 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 910 | { |
| 911 | struct virtnet_info *vi = netdev_priv(dev); |
| 912 | struct scatterlist sg; |
| 913 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 914 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 915 | |
| 916 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 917 | VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) |
| 918 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 919 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 920 | } |
| 921 | |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 922 | static void virtnet_get_ringparam(struct net_device *dev, |
| 923 | struct ethtool_ringparam *ring) |
| 924 | { |
| 925 | struct virtnet_info *vi = netdev_priv(dev); |
| 926 | |
| 927 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq); |
| 928 | ring->tx_max_pending = virtqueue_get_vring_size(vi->svq); |
| 929 | ring->rx_pending = ring->rx_max_pending; |
| 930 | ring->tx_pending = ring->tx_max_pending; |
| 931 | |
| 932 | } |
| 933 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 934 | |
| 935 | static void virtnet_get_drvinfo(struct net_device *dev, |
| 936 | struct ethtool_drvinfo *info) |
| 937 | { |
| 938 | struct virtnet_info *vi = netdev_priv(dev); |
| 939 | struct virtio_device *vdev = vi->vdev; |
| 940 | |
| 941 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 942 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
| 943 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
| 944 | |
| 945 | } |
| 946 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 947 | static const struct ethtool_ops virtnet_ethtool_ops = { |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 948 | .get_drvinfo = virtnet_get_drvinfo, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 949 | .get_link = ethtool_op_get_link, |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 950 | .get_ringparam = virtnet_get_ringparam, |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 951 | }; |
| 952 | |
Mark McLoughlin | 39da581 | 2008-11-26 13:58:11 +0000 | [diff] [blame] | 953 | #define MIN_MTU 68 |
| 954 | #define MAX_MTU 65535 |
| 955 | |
| 956 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) |
| 957 | { |
| 958 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) |
| 959 | return -EINVAL; |
| 960 | dev->mtu = new_mtu; |
| 961 | return 0; |
| 962 | } |
| 963 | |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 964 | static const struct net_device_ops virtnet_netdev = { |
| 965 | .ndo_open = virtnet_open, |
| 966 | .ndo_stop = virtnet_close, |
| 967 | .ndo_start_xmit = start_xmit, |
| 968 | .ndo_validate_addr = eth_validate_addr, |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 969 | .ndo_set_mac_address = virtnet_set_mac_address, |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 970 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 971 | .ndo_change_mtu = virtnet_change_mtu, |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 972 | .ndo_get_stats64 = virtnet_stats, |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 973 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 974 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 975 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 976 | .ndo_poll_controller = virtnet_netpoll, |
| 977 | #endif |
| 978 | }; |
| 979 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 980 | static void virtnet_config_changed_work(struct work_struct *work) |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 981 | { |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 982 | struct virtnet_info *vi = |
| 983 | container_of(work, struct virtnet_info, config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 984 | u16 v; |
| 985 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 986 | mutex_lock(&vi->config_lock); |
| 987 | if (!vi->config_enable) |
| 988 | goto done; |
| 989 | |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 990 | if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 991 | offsetof(struct virtio_net_config, status), |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 992 | &v) < 0) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 993 | goto done; |
| 994 | |
| 995 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
Amerigo Wang | ee89bab | 2012-08-09 22:14:56 +0000 | [diff] [blame] | 996 | netdev_notify_peers(vi->dev); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 997 | virtnet_ack_link_announce(vi); |
| 998 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 999 | |
| 1000 | /* Ignore unknown (future) status bits */ |
| 1001 | v &= VIRTIO_NET_S_LINK_UP; |
| 1002 | |
| 1003 | if (vi->status == v) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1004 | goto done; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1005 | |
| 1006 | vi->status = v; |
| 1007 | |
| 1008 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
| 1009 | netif_carrier_on(vi->dev); |
| 1010 | netif_wake_queue(vi->dev); |
| 1011 | } else { |
| 1012 | netif_carrier_off(vi->dev); |
| 1013 | netif_stop_queue(vi->dev); |
| 1014 | } |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1015 | done: |
| 1016 | mutex_unlock(&vi->config_lock); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1017 | } |
| 1018 | |
| 1019 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 1020 | { |
| 1021 | struct virtnet_info *vi = vdev->priv; |
| 1022 | |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1023 | schedule_work(&vi->config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1024 | } |
| 1025 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1026 | static int init_vqs(struct virtnet_info *vi) |
| 1027 | { |
| 1028 | struct virtqueue *vqs[3]; |
| 1029 | vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; |
| 1030 | const char *names[] = { "input", "output", "control" }; |
| 1031 | int nvqs, err; |
| 1032 | |
| 1033 | /* We expect two virtqueues, receive then send, |
| 1034 | * and optionally control. */ |
| 1035 | nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; |
| 1036 | |
| 1037 | err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names); |
| 1038 | if (err) |
| 1039 | return err; |
| 1040 | |
| 1041 | vi->rvq = vqs[0]; |
| 1042 | vi->svq = vqs[1]; |
| 1043 | |
| 1044 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { |
| 1045 | vi->cvq = vqs[2]; |
| 1046 | |
| 1047 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
| 1048 | vi->dev->features |= NETIF_F_HW_VLAN_FILTER; |
| 1049 | } |
| 1050 | return 0; |
| 1051 | } |
| 1052 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1053 | static int virtnet_probe(struct virtio_device *vdev) |
| 1054 | { |
| 1055 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1056 | struct net_device *dev; |
| 1057 | struct virtnet_info *vi; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1058 | |
| 1059 | /* Allocate ourselves a network device with room for our info */ |
| 1060 | dev = alloc_etherdev(sizeof(struct virtnet_info)); |
| 1061 | if (!dev) |
| 1062 | return -ENOMEM; |
| 1063 | |
| 1064 | /* Set up network device as normal. */ |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1065 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1066 | dev->netdev_ops = &virtnet_netdev; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1067 | dev->features = NETIF_F_HIGHDMA; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1068 | |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1069 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1070 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 1071 | |
| 1072 | /* Do we support "hardware" checksums? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1073 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1074 | /* This opens up the world of extra features. */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1075 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 1076 | if (csum) |
| 1077 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 1078 | |
| 1079 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
| 1080 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1081 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 1082 | } |
Rusty Russell | 5539ae96 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1083 | /* Individual feature bits: what can host handle? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1084 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 1085 | dev->hw_features |= NETIF_F_TSO; |
| 1086 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 1087 | dev->hw_features |= NETIF_F_TSO6; |
| 1088 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 1089 | dev->hw_features |= NETIF_F_TSO_ECN; |
| 1090 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
| 1091 | dev->hw_features |= NETIF_F_UFO; |
| 1092 | |
| 1093 | if (gso) |
| 1094 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
| 1095 | /* (!csum && gso) case will be fixed by register_netdev() */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1096 | } |
| 1097 | |
| 1098 | /* Configuration may specify what MAC to use. Otherwise random. */ |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1099 | if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, |
Rusty Russell | a586d4f | 2008-02-04 23:49:56 -0500 | [diff] [blame] | 1100 | offsetof(struct virtio_net_config, mac), |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1101 | dev->dev_addr, dev->addr_len) < 0) |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 1102 | eth_hw_addr_random(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1103 | |
| 1104 | /* Set up our device-specific information */ |
| 1105 | vi = netdev_priv(dev); |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 1106 | netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1107 | vi->dev = dev; |
| 1108 | vi->vdev = vdev; |
Christian Borntraeger | d9d5dcc | 2008-02-18 10:02:51 +0100 | [diff] [blame] | 1109 | vdev->priv = vi; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1110 | vi->pages = NULL; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1111 | vi->stats = alloc_percpu(struct virtnet_stats); |
| 1112 | err = -ENOMEM; |
| 1113 | if (vi->stats == NULL) |
| 1114 | goto free; |
| 1115 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1116 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1117 | mutex_init(&vi->config_lock); |
| 1118 | vi->config_enable = true; |
| 1119 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
Michael S. Tsirkin | 5e01d2f | 2010-04-07 21:01:41 -0700 | [diff] [blame] | 1120 | sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); |
| 1121 | sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1122 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1123 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 1124 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 1125 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 1126 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1127 | vi->big_packets = true; |
| 1128 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1129 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 1130 | vi->mergeable_rx_bufs = true; |
| 1131 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1132 | err = init_vqs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1133 | if (err) |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1134 | goto free_stats; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1135 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1136 | err = register_netdev(dev); |
| 1137 | if (err) { |
| 1138 | pr_debug("virtio_net: registering device failed\n"); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1139 | goto free_vqs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1140 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1141 | |
| 1142 | /* Last of all, set up some receive buffers. */ |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1143 | try_fill_recv(vi, GFP_KERNEL); |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1144 | |
| 1145 | /* If we didn't even get one input buffer, we're useless. */ |
| 1146 | if (vi->num == 0) { |
| 1147 | err = -ENOMEM; |
| 1148 | goto unregister; |
| 1149 | } |
| 1150 | |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 1151 | /* Assume link up if device can't report link status, |
| 1152 | otherwise get link status from config. */ |
| 1153 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
| 1154 | netif_carrier_off(dev); |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1155 | schedule_work(&vi->config_work); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 1156 | } else { |
| 1157 | vi->status = VIRTIO_NET_S_LINK_UP; |
| 1158 | netif_carrier_on(dev); |
| 1159 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1160 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1161 | pr_debug("virtnet: registered device %s\n", dev->name); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1162 | return 0; |
| 1163 | |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1164 | unregister: |
| 1165 | unregister_netdev(dev); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1166 | free_vqs: |
| 1167 | vdev->config->del_vqs(vdev); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1168 | free_stats: |
| 1169 | free_percpu(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1170 | free: |
| 1171 | free_netdev(dev); |
| 1172 | return err; |
| 1173 | } |
| 1174 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1175 | static void free_unused_bufs(struct virtnet_info *vi) |
| 1176 | { |
| 1177 | void *buf; |
| 1178 | while (1) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 1179 | buf = virtqueue_detach_unused_buf(vi->svq); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 1180 | if (!buf) |
| 1181 | break; |
| 1182 | dev_kfree_skb(buf); |
| 1183 | } |
| 1184 | while (1) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 1185 | buf = virtqueue_detach_unused_buf(vi->rvq); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1186 | if (!buf) |
| 1187 | break; |
| 1188 | if (vi->mergeable_rx_bufs || vi->big_packets) |
| 1189 | give_pages(vi, buf); |
| 1190 | else |
| 1191 | dev_kfree_skb(buf); |
| 1192 | --vi->num; |
| 1193 | } |
| 1194 | BUG_ON(vi->num != 0); |
| 1195 | } |
| 1196 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1197 | static void remove_vq_common(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1198 | { |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1199 | vi->vdev->config->reset(vi->vdev); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 1200 | |
| 1201 | /* Free unused buffers in both send and recv, if any. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1202 | free_unused_bufs(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1203 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1204 | vi->vdev->config->del_vqs(vi->vdev); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1205 | |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1206 | while (vi->pages) |
| 1207 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1208 | } |
| 1209 | |
| 1210 | static void __devexit virtnet_remove(struct virtio_device *vdev) |
| 1211 | { |
| 1212 | struct virtnet_info *vi = vdev->priv; |
| 1213 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1214 | /* Prevent config work handler from accessing the device. */ |
| 1215 | mutex_lock(&vi->config_lock); |
| 1216 | vi->config_enable = false; |
| 1217 | mutex_unlock(&vi->config_lock); |
| 1218 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1219 | unregister_netdev(vi->dev); |
| 1220 | |
| 1221 | remove_vq_common(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1222 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1223 | flush_work(&vi->config_work); |
| 1224 | |
Krishna Kumar | 2e66f55 | 2011-07-20 03:56:02 +0000 | [diff] [blame] | 1225 | free_percpu(vi->stats); |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 1226 | free_netdev(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1227 | } |
| 1228 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1229 | #ifdef CONFIG_PM |
| 1230 | static int virtnet_freeze(struct virtio_device *vdev) |
| 1231 | { |
| 1232 | struct virtnet_info *vi = vdev->priv; |
| 1233 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1234 | /* Prevent config work handler from accessing the device */ |
| 1235 | mutex_lock(&vi->config_lock); |
| 1236 | vi->config_enable = false; |
| 1237 | mutex_unlock(&vi->config_lock); |
| 1238 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1239 | netif_device_detach(vi->dev); |
| 1240 | cancel_delayed_work_sync(&vi->refill); |
| 1241 | |
| 1242 | if (netif_running(vi->dev)) |
| 1243 | napi_disable(&vi->napi); |
| 1244 | |
| 1245 | remove_vq_common(vi); |
| 1246 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1247 | flush_work(&vi->config_work); |
| 1248 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1249 | return 0; |
| 1250 | } |
| 1251 | |
| 1252 | static int virtnet_restore(struct virtio_device *vdev) |
| 1253 | { |
| 1254 | struct virtnet_info *vi = vdev->priv; |
| 1255 | int err; |
| 1256 | |
| 1257 | err = init_vqs(vi); |
| 1258 | if (err) |
| 1259 | return err; |
| 1260 | |
| 1261 | if (netif_running(vi->dev)) |
| 1262 | virtnet_napi_enable(vi); |
| 1263 | |
| 1264 | netif_device_attach(vi->dev); |
| 1265 | |
| 1266 | if (!try_fill_recv(vi, GFP_KERNEL)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1267 | schedule_delayed_work(&vi->refill, 0); |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1268 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1269 | mutex_lock(&vi->config_lock); |
| 1270 | vi->config_enable = true; |
| 1271 | mutex_unlock(&vi->config_lock); |
| 1272 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1273 | return 0; |
| 1274 | } |
| 1275 | #endif |
| 1276 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1277 | static struct virtio_device_id id_table[] = { |
| 1278 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 1279 | { 0 }, |
| 1280 | }; |
| 1281 | |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1282 | static unsigned int features[] = { |
Mark McLoughlin | 5e4fe5c | 2008-07-08 17:10:42 +1000 | [diff] [blame] | 1283 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
| 1284 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1285 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1286 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
Sridhar Samudrala | 5c51675 | 2009-07-14 14:21:02 +0000 | [diff] [blame] | 1287 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 1288 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1289 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1290 | VIRTIO_NET_F_GUEST_ANNOUNCE, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1291 | }; |
| 1292 | |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1293 | static struct virtio_driver virtio_net_driver = { |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1294 | .feature_table = features, |
| 1295 | .feature_table_size = ARRAY_SIZE(features), |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1296 | .driver.name = KBUILD_MODNAME, |
| 1297 | .driver.owner = THIS_MODULE, |
| 1298 | .id_table = id_table, |
| 1299 | .probe = virtnet_probe, |
| 1300 | .remove = __devexit_p(virtnet_remove), |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1301 | .config_changed = virtnet_config_changed, |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1302 | #ifdef CONFIG_PM |
| 1303 | .freeze = virtnet_freeze, |
| 1304 | .restore = virtnet_restore, |
| 1305 | #endif |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1306 | }; |
| 1307 | |
| 1308 | static int __init init(void) |
| 1309 | { |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1310 | return register_virtio_driver(&virtio_net_driver); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1311 | } |
| 1312 | |
| 1313 | static void __exit fini(void) |
| 1314 | { |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1315 | unregister_virtio_driver(&virtio_net_driver); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1316 | } |
| 1317 | module_init(init); |
| 1318 | module_exit(fini); |
| 1319 | |
| 1320 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 1321 | MODULE_DESCRIPTION("Virtio network driver"); |
| 1322 | MODULE_LICENSE("GPL"); |