Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1 | /* A network driver using virtio. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2 | * |
| 3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | //#define DEBUG |
| 20 | #include <linux/netdevice.h> |
| 21 | #include <linux/etherdevice.h> |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 22 | #include <linux/ethtool.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 23 | #include <linux/module.h> |
| 24 | #include <linux/virtio.h> |
| 25 | #include <linux/virtio_net.h> |
| 26 | #include <linux/scatterlist.h> |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 27 | #include <linux/if_vlan.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 29 | |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 30 | static int napi_weight = 128; |
| 31 | module_param(napi_weight, int, 0444); |
| 32 | |
Rusty Russell | eb93992 | 2011-12-19 14:08:01 +0000 | [diff] [blame] | 33 | static bool csum = true, gso = true; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 34 | module_param(csum, bool, 0444); |
| 35 | module_param(gso, bool, 0444); |
| 36 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 37 | /* FIXME: MTU in config. */ |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 38 | #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 39 | #define GOOD_COPY_LEN 128 |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 40 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 41 | #define VIRTNET_SEND_COMMAND_SG_MAX 2 |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 42 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 43 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 44 | struct virtnet_stats { |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 45 | struct u64_stats_sync tx_syncp; |
| 46 | struct u64_stats_sync rx_syncp; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 47 | u64 tx_bytes; |
| 48 | u64 tx_packets; |
| 49 | |
| 50 | u64 rx_bytes; |
| 51 | u64 rx_packets; |
| 52 | }; |
| 53 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 54 | /* Internal representation of a send virtqueue */ |
| 55 | struct send_queue { |
| 56 | /* Virtqueue associated with this send _queue */ |
| 57 | struct virtqueue *vq; |
| 58 | |
| 59 | /* TX: fragments + linear part + virtio header */ |
| 60 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 61 | |
| 62 | /* Name of the send queue: output.$index */ |
| 63 | char name[40]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 64 | }; |
| 65 | |
| 66 | /* Internal representation of a receive virtqueue */ |
| 67 | struct receive_queue { |
| 68 | /* Virtqueue associated with this receive_queue */ |
| 69 | struct virtqueue *vq; |
| 70 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 71 | struct napi_struct napi; |
| 72 | |
| 73 | /* Number of input buffers, and max we've ever had. */ |
| 74 | unsigned int num, max; |
| 75 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 76 | /* Chain pages by the private ptr. */ |
| 77 | struct page *pages; |
| 78 | |
| 79 | /* RX: fragments + linear part + virtio header */ |
| 80 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 81 | |
| 82 | /* Name of this receive queue: input.$index */ |
| 83 | char name[40]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 84 | }; |
| 85 | |
| 86 | struct virtnet_info { |
| 87 | struct virtio_device *vdev; |
| 88 | struct virtqueue *cvq; |
| 89 | struct net_device *dev; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 90 | struct send_queue *sq; |
| 91 | struct receive_queue *rq; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 92 | unsigned int status; |
| 93 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 94 | /* Max # of queue pairs supported by the device */ |
| 95 | u16 max_queue_pairs; |
| 96 | |
| 97 | /* # of queue pairs currently used by the driver */ |
| 98 | u16 curr_queue_pairs; |
| 99 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 100 | /* I like... big packets and I cannot lie! */ |
| 101 | bool big_packets; |
| 102 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 103 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 104 | bool mergeable_rx_bufs; |
| 105 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 106 | /* Has control virtqueue */ |
| 107 | bool has_cvq; |
| 108 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 109 | /* enable config space updates */ |
| 110 | bool config_enable; |
| 111 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 112 | /* Active statistics */ |
| 113 | struct virtnet_stats __percpu *stats; |
| 114 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 115 | /* Work struct for refilling if we run low on memory. */ |
| 116 | struct delayed_work refill; |
| 117 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 118 | /* Work struct for config space updates */ |
| 119 | struct work_struct config_work; |
| 120 | |
| 121 | /* Lock for config space updates */ |
| 122 | struct mutex config_lock; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 123 | |
| 124 | /* Does the affinity hint is set for virtqueues? */ |
| 125 | bool affinity_hint_set; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 126 | }; |
| 127 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 128 | struct skb_vnet_hdr { |
| 129 | union { |
| 130 | struct virtio_net_hdr hdr; |
| 131 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
| 132 | }; |
| 133 | }; |
| 134 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 135 | struct padded_vnet_hdr { |
| 136 | struct virtio_net_hdr hdr; |
| 137 | /* |
| 138 | * virtio_net_hdr should be in a separated sg buffer because of a |
| 139 | * QEMU bug, and data sg buffer shares same page with this header sg. |
| 140 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. |
| 141 | */ |
| 142 | char padding[6]; |
| 143 | }; |
| 144 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 145 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
| 146 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
| 147 | */ |
| 148 | static int vq2txq(struct virtqueue *vq) |
| 149 | { |
| 150 | return (virtqueue_get_queue_index(vq) - 1) / 2; |
| 151 | } |
| 152 | |
| 153 | static int txq2vq(int txq) |
| 154 | { |
| 155 | return txq * 2 + 1; |
| 156 | } |
| 157 | |
| 158 | static int vq2rxq(struct virtqueue *vq) |
| 159 | { |
| 160 | return virtqueue_get_queue_index(vq) / 2; |
| 161 | } |
| 162 | |
| 163 | static int rxq2vq(int rxq) |
| 164 | { |
| 165 | return rxq * 2; |
| 166 | } |
| 167 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 168 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 169 | { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 170 | return (struct skb_vnet_hdr *)skb->cb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 171 | } |
| 172 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 173 | /* |
| 174 | * private is used to chain pages for big packets, put the whole |
| 175 | * most recent used list in the beginning for reuse |
| 176 | */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 177 | static void give_pages(struct receive_queue *rq, struct page *page) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 178 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 179 | struct page *end; |
| 180 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 181 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 182 | for (end = page; end->private; end = (struct page *)end->private); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 183 | end->private = (unsigned long)rq->pages; |
| 184 | rq->pages = page; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 185 | } |
| 186 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 187 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 188 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 189 | struct page *p = rq->pages; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 190 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 191 | if (p) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 192 | rq->pages = (struct page *)p->private; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 193 | /* clear private here, it is used to chain pages */ |
| 194 | p->private = 0; |
| 195 | } else |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 196 | p = alloc_page(gfp_mask); |
| 197 | return p; |
| 198 | } |
| 199 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 200 | static void skb_xmit_done(struct virtqueue *vq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 201 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 202 | struct virtnet_info *vi = vq->vdev->priv; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 203 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 204 | /* Suppress further interrupts. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 205 | virtqueue_disable_cb(vq); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 206 | |
Rusty Russell | 363f151 | 2008-06-08 20:51:55 +1000 | [diff] [blame] | 207 | /* We were probably waiting for more output buffers. */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 208 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 209 | } |
| 210 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 211 | static void set_skb_frag(struct sk_buff *skb, struct page *page, |
| 212 | unsigned int offset, unsigned int *len) |
| 213 | { |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 214 | int size = min((unsigned)PAGE_SIZE - offset, *len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 215 | int i = skb_shinfo(skb)->nr_frags; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 216 | |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 217 | __skb_fill_page_desc(skb, i, page, offset, size); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 218 | |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 219 | skb->data_len += size; |
| 220 | skb->len += size; |
Eric Dumazet | 4b72736 | 2011-10-19 23:14:46 +0000 | [diff] [blame] | 221 | skb->truesize += PAGE_SIZE; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 222 | skb_shinfo(skb)->nr_frags++; |
Krishna Kumar | 8a59a7b | 2011-10-19 22:17:27 +0000 | [diff] [blame] | 223 | *len -= size; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 224 | } |
| 225 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 226 | /* Called from bottom half context */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 227 | static struct sk_buff *page_to_skb(struct receive_queue *rq, |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 228 | struct page *page, unsigned int len) |
| 229 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 230 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 231 | struct sk_buff *skb; |
| 232 | struct skb_vnet_hdr *hdr; |
| 233 | unsigned int copy, hdr_len, offset; |
| 234 | char *p; |
| 235 | |
| 236 | p = page_address(page); |
| 237 | |
| 238 | /* copy small packet so we can reuse these pages for small data */ |
| 239 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); |
| 240 | if (unlikely(!skb)) |
| 241 | return NULL; |
| 242 | |
| 243 | hdr = skb_vnet_hdr(skb); |
| 244 | |
| 245 | if (vi->mergeable_rx_bufs) { |
| 246 | hdr_len = sizeof hdr->mhdr; |
| 247 | offset = hdr_len; |
| 248 | } else { |
| 249 | hdr_len = sizeof hdr->hdr; |
| 250 | offset = sizeof(struct padded_vnet_hdr); |
| 251 | } |
| 252 | |
| 253 | memcpy(hdr, p, hdr_len); |
| 254 | |
| 255 | len -= hdr_len; |
| 256 | p += offset; |
| 257 | |
| 258 | copy = len; |
| 259 | if (copy > skb_tailroom(skb)) |
| 260 | copy = skb_tailroom(skb); |
| 261 | memcpy(skb_put(skb, copy), p, copy); |
| 262 | |
| 263 | len -= copy; |
| 264 | offset += copy; |
| 265 | |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 266 | /* |
| 267 | * Verify that we can indeed put this data into a skb. |
| 268 | * This is here to handle cases when the device erroneously |
| 269 | * tries to receive more than is possible. This is usually |
| 270 | * the case of a broken device. |
| 271 | */ |
| 272 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 273 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 274 | dev_kfree_skb(skb); |
| 275 | return NULL; |
| 276 | } |
| 277 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 278 | while (len) { |
| 279 | set_skb_frag(skb, page, offset, &len); |
| 280 | page = (struct page *)page->private; |
| 281 | offset = 0; |
| 282 | } |
| 283 | |
| 284 | if (page) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 285 | give_pages(rq, page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 286 | |
| 287 | return skb; |
| 288 | } |
| 289 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 290 | static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 291 | { |
| 292 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
| 293 | struct page *page; |
| 294 | int num_buf, i, len; |
| 295 | |
| 296 | num_buf = hdr->mhdr.num_buffers; |
| 297 | while (--num_buf) { |
| 298 | i = skb_shinfo(skb)->nr_frags; |
| 299 | if (i >= MAX_SKB_FRAGS) { |
| 300 | pr_debug("%s: packet too long\n", skb->dev->name); |
| 301 | skb->dev->stats.rx_length_errors++; |
| 302 | return -EINVAL; |
| 303 | } |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 304 | page = virtqueue_get_buf(rq->vq, &len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 305 | if (!page) { |
| 306 | pr_debug("%s: rx error: %d buffers missing\n", |
| 307 | skb->dev->name, hdr->mhdr.num_buffers); |
| 308 | skb->dev->stats.rx_length_errors++; |
| 309 | return -EINVAL; |
| 310 | } |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 311 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 312 | if (len > PAGE_SIZE) |
| 313 | len = PAGE_SIZE; |
| 314 | |
| 315 | set_skb_frag(skb, page, 0, &len); |
| 316 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 317 | --rq->num; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 318 | } |
| 319 | return 0; |
| 320 | } |
| 321 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 322 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 323 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 324 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 325 | struct net_device *dev = vi->dev; |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 326 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 327 | struct sk_buff *skb; |
| 328 | struct page *page; |
| 329 | struct skb_vnet_hdr *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 330 | |
| 331 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
| 332 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 333 | dev->stats.rx_length_errors++; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 334 | if (vi->mergeable_rx_bufs || vi->big_packets) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 335 | give_pages(rq, buf); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 336 | else |
| 337 | dev_kfree_skb(buf); |
| 338 | return; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 339 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 340 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 341 | if (!vi->mergeable_rx_bufs && !vi->big_packets) { |
| 342 | skb = buf; |
| 343 | len -= sizeof(struct virtio_net_hdr); |
| 344 | skb_trim(skb, len); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 345 | } else { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 346 | page = buf; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 347 | skb = page_to_skb(rq, page, len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 348 | if (unlikely(!skb)) { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 349 | dev->stats.rx_dropped++; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 350 | give_pages(rq, page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 351 | return; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 352 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 353 | if (vi->mergeable_rx_bufs) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 354 | if (receive_mergeable(rq, skb)) { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 355 | dev_kfree_skb(skb); |
| 356 | return; |
| 357 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 358 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 359 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 360 | hdr = skb_vnet_hdr(skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 361 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 362 | u64_stats_update_begin(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 363 | stats->rx_bytes += skb->len; |
| 364 | stats->rx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 365 | u64_stats_update_end(&stats->rx_syncp); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 366 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 367 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 368 | pr_debug("Needs csum!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 369 | if (!skb_partial_csum_set(skb, |
| 370 | hdr->hdr.csum_start, |
| 371 | hdr->hdr.csum_offset)) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 372 | goto frame_err; |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 373 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { |
| 374 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 375 | } |
| 376 | |
Mark McLoughlin | 23cde76 | 2008-06-08 20:49:00 +1000 | [diff] [blame] | 377 | skb->protocol = eth_type_trans(skb, dev); |
| 378 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 379 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 380 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 381 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 382 | pr_debug("GSO!\n"); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 383 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 384 | case VIRTIO_NET_HDR_GSO_TCPV4: |
| 385 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
| 386 | break; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 387 | case VIRTIO_NET_HDR_GSO_UDP: |
| 388 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
| 389 | break; |
| 390 | case VIRTIO_NET_HDR_GSO_TCPV6: |
| 391 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
| 392 | break; |
| 393 | default: |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 394 | net_warn_ratelimited("%s: bad gso type %u.\n", |
| 395 | dev->name, hdr->hdr.gso_type); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 396 | goto frame_err; |
| 397 | } |
| 398 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 399 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 400 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
| 401 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 402 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 403 | if (skb_shinfo(skb)->gso_size == 0) { |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 404 | net_warn_ratelimited("%s: zero gso size.\n", dev->name); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 405 | goto frame_err; |
| 406 | } |
| 407 | |
| 408 | /* Header must be checked, and gso_segs computed. */ |
| 409 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 410 | skb_shinfo(skb)->gso_segs = 0; |
| 411 | } |
| 412 | |
| 413 | netif_receive_skb(skb); |
| 414 | return; |
| 415 | |
| 416 | frame_err: |
| 417 | dev->stats.rx_frame_errors++; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 418 | dev_kfree_skb(skb); |
| 419 | } |
| 420 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 421 | static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 422 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 423 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 424 | struct sk_buff *skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 425 | struct skb_vnet_hdr *hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 426 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 427 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 428 | skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 429 | if (unlikely(!skb)) |
| 430 | return -ENOMEM; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 431 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 432 | skb_put(skb, MAX_PACKET_LEN); |
| 433 | |
| 434 | hdr = skb_vnet_hdr(skb); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 435 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 436 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 437 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 438 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 439 | err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 440 | if (err < 0) |
| 441 | dev_kfree_skb(skb); |
| 442 | |
| 443 | return err; |
| 444 | } |
| 445 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 446 | static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 447 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 448 | struct page *first, *list = NULL; |
| 449 | char *p; |
| 450 | int i, err, offset; |
| 451 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 452 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 453 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 454 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 455 | if (!first) { |
| 456 | if (list) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 457 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 458 | return -ENOMEM; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 459 | } |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 460 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 461 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 462 | /* chain new page in list head to match sg */ |
| 463 | first->private = (unsigned long)list; |
| 464 | list = first; |
| 465 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 466 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 467 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 468 | if (!first) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 469 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 470 | return -ENOMEM; |
| 471 | } |
| 472 | p = page_address(first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 473 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 474 | /* rq->sg[0], rq->sg[1] share the same page */ |
| 475 | /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ |
| 476 | sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 477 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 478 | /* rq->sg[1] for data packet, from offset */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 479 | offset = sizeof(struct padded_vnet_hdr); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 480 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 481 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 482 | /* chain first in list head */ |
| 483 | first->private = (unsigned long)list; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 484 | err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 485 | first, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 486 | if (err < 0) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 487 | give_pages(rq, first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 488 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 489 | return err; |
| 490 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 491 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 492 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 493 | { |
| 494 | struct page *page; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 495 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 496 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 497 | page = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 498 | if (!page) |
| 499 | return -ENOMEM; |
| 500 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 501 | sg_init_one(rq->sg, page_address(page), PAGE_SIZE); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 502 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 503 | err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 504 | if (err < 0) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 505 | give_pages(rq, page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 506 | |
| 507 | return err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 508 | } |
| 509 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 510 | /* |
| 511 | * Returns false if we couldn't fill entirely (OOM). |
| 512 | * |
| 513 | * Normally run in the receive path, but can also be run from ndo_open |
| 514 | * before we're receiving packets, or from refill_work which is |
| 515 | * careful to disable receiving (using napi_disable). |
| 516 | */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 517 | static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 518 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 519 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 520 | int err; |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 521 | bool oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 522 | |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 523 | do { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 524 | if (vi->mergeable_rx_bufs) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 525 | err = add_recvbuf_mergeable(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 526 | else if (vi->big_packets) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 527 | err = add_recvbuf_big(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 528 | else |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 529 | err = add_recvbuf_small(rq, gfp); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 530 | |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 531 | oom = err == -ENOMEM; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 532 | if (err) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 533 | break; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 534 | ++rq->num; |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 535 | } while (rq->vq->num_free); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 536 | if (unlikely(rq->num > rq->max)) |
| 537 | rq->max = rq->num; |
| 538 | virtqueue_kick(rq->vq); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 539 | return !oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 540 | } |
| 541 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 542 | static void skb_recv_done(struct virtqueue *rvq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 543 | { |
| 544 | struct virtnet_info *vi = rvq->vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 545 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 546 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 547 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 548 | if (napi_schedule_prep(&rq->napi)) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 549 | virtqueue_disable_cb(rvq); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 550 | __napi_schedule(&rq->napi); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 551 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 552 | } |
| 553 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 554 | static void virtnet_napi_enable(struct receive_queue *rq) |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 555 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 556 | napi_enable(&rq->napi); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 557 | |
| 558 | /* If all buffers were filled by other side before we napi_enabled, we |
| 559 | * won't get another interrupt, so process any outstanding packets |
| 560 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
| 561 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 562 | if (napi_schedule_prep(&rq->napi)) { |
| 563 | virtqueue_disable_cb(rq->vq); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 564 | local_bh_disable(); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 565 | __napi_schedule(&rq->napi); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 566 | local_bh_enable(); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 567 | } |
| 568 | } |
| 569 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 570 | static void refill_work(struct work_struct *work) |
| 571 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 572 | struct virtnet_info *vi = |
| 573 | container_of(work, struct virtnet_info, refill.work); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 574 | bool still_empty; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 575 | int i; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 576 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 577 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 578 | struct receive_queue *rq = &vi->rq[i]; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 579 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 580 | napi_disable(&rq->napi); |
| 581 | still_empty = !try_fill_recv(rq, GFP_KERNEL); |
| 582 | virtnet_napi_enable(rq); |
| 583 | |
| 584 | /* In theory, this can happen: if we don't get any buffers in |
| 585 | * we will *never* try to fill again. |
| 586 | */ |
| 587 | if (still_empty) |
| 588 | schedule_delayed_work(&vi->refill, HZ/2); |
| 589 | } |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 590 | } |
| 591 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 592 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 593 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 594 | struct receive_queue *rq = |
| 595 | container_of(napi, struct receive_queue, napi); |
| 596 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 597 | void *buf; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 598 | unsigned int len, received = 0; |
| 599 | |
| 600 | again: |
| 601 | while (received < budget && |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 602 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
| 603 | receive_buf(rq, buf, len); |
| 604 | --rq->num; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 605 | received++; |
| 606 | } |
| 607 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 608 | if (rq->num < rq->max / 2) { |
| 609 | if (!try_fill_recv(rq, GFP_ATOMIC)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 610 | schedule_delayed_work(&vi->refill, 0); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 611 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 612 | |
Rusty Russell | 8329d98 | 2007-11-19 11:20:43 -0500 | [diff] [blame] | 613 | /* Out of packets? */ |
| 614 | if (received < budget) { |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 615 | napi_complete(napi); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 616 | if (unlikely(!virtqueue_enable_cb(rq->vq)) && |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 617 | napi_schedule_prep(napi)) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 618 | virtqueue_disable_cb(rq->vq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 619 | __napi_schedule(napi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 620 | goto again; |
Christian Borntraeger | 4265f16 | 2008-03-14 14:17:05 +0100 | [diff] [blame] | 621 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | return received; |
| 625 | } |
| 626 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 627 | static int virtnet_open(struct net_device *dev) |
| 628 | { |
| 629 | struct virtnet_info *vi = netdev_priv(dev); |
| 630 | int i; |
| 631 | |
| 632 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 633 | /* Make sure we have some buffers: if oom use wq. */ |
| 634 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
| 635 | schedule_delayed_work(&vi->refill, 0); |
| 636 | virtnet_napi_enable(&vi->rq[i]); |
| 637 | } |
| 638 | |
| 639 | return 0; |
| 640 | } |
| 641 | |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 642 | static void free_old_xmit_skbs(struct send_queue *sq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 643 | { |
| 644 | struct sk_buff *skb; |
Michael S. Tsirkin | 6ee57bc | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 645 | unsigned int len; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 646 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 647 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 648 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 649 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 650 | pr_debug("Sent skb %p\n", skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 651 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 652 | u64_stats_update_begin(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 653 | stats->tx_bytes += skb->len; |
| 654 | stats->tx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 655 | u64_stats_update_end(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 656 | |
Eric Dumazet | ed79bab | 2009-10-14 14:36:43 +0000 | [diff] [blame] | 657 | dev_kfree_skb_any(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 658 | } |
| 659 | } |
| 660 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 661 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 662 | { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 663 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 664 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 665 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Michael S. Tsirkin | 7bedc7d | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 666 | unsigned num_sg; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 667 | |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 668 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 669 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 670 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 671 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 672 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 673 | hdr->hdr.csum_offset = skb->csum_offset; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 674 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 675 | hdr->hdr.flags = 0; |
| 676 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | if (skb_is_gso(skb)) { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 680 | hdr->hdr.hdr_len = skb_headlen(skb); |
| 681 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 682 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 683 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 684 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 685 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 686 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 687 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 688 | else |
| 689 | BUG(); |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 690 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 691 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 692 | } else { |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 693 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 694 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 695 | } |
| 696 | |
Rusty Russell | b3f2469 | 2009-09-24 09:59:19 -0600 | [diff] [blame] | 697 | hdr->mhdr.num_buffers = 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 698 | |
| 699 | /* Encode metadata header at front. */ |
| 700 | if (vi->mergeable_rx_bufs) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 701 | sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 702 | else |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 703 | sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 704 | |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 705 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; |
| 706 | return virtqueue_add_buf(sq->vq, sq->sg, num_sg, |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 707 | 0, skb, GFP_ATOMIC); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 708 | } |
| 709 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 710 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 711 | { |
| 712 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 713 | int qnum = skb_get_queue_mapping(skb); |
| 714 | struct send_queue *sq = &vi->sq[qnum]; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 715 | int err; |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 716 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 717 | /* Free up any pending old buffers before queueing new ones. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 718 | free_old_xmit_skbs(sq); |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 719 | |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 720 | /* Try to transmit */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 721 | err = xmit_skb(sq, skb); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 722 | |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 723 | /* This should not happen! */ |
Rusty Russell | 0e3daa6 | 2012-10-16 23:56:15 +1030 | [diff] [blame] | 724 | if (unlikely(err)) { |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 725 | dev->stats.tx_fifo_errors++; |
| 726 | if (net_ratelimit()) |
| 727 | dev_warn(&dev->dev, |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 728 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 729 | dev->stats.tx_dropped++; |
| 730 | kfree_skb(skb); |
| 731 | return NETDEV_TX_OK; |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 732 | } |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 733 | virtqueue_kick(sq->vq); |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 734 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 735 | /* Don't wait up for transmitted skbs to be freed. */ |
| 736 | skb_orphan(skb); |
| 737 | nf_reset(skb); |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 738 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 739 | /* Apparently nice girls don't return TX_BUSY; stop the queue |
| 740 | * before it gets out of hand. Naturally, this wastes entries. */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 741 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 742 | netif_stop_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 743 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 744 | /* More just got used, free them then recheck. */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 745 | free_old_xmit_skbs(sq); |
| 746 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 747 | netif_start_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 748 | virtqueue_disable_cb(sq->vq); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 749 | } |
| 750 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 751 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 752 | |
| 753 | return NETDEV_TX_OK; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 754 | } |
| 755 | |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 756 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 757 | { |
| 758 | struct virtnet_info *vi = netdev_priv(dev); |
| 759 | struct virtio_device *vdev = vi->vdev; |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 760 | int ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 761 | |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 762 | ret = eth_mac_addr(dev, p); |
| 763 | if (ret) |
| 764 | return ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 765 | |
Alex Williamson | 62994b2 | 2009-04-04 16:40:19 -0700 | [diff] [blame] | 766 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 767 | vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), |
| 768 | dev->dev_addr, dev->addr_len); |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 769 | |
| 770 | return 0; |
| 771 | } |
| 772 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 773 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
| 774 | struct rtnl_link_stats64 *tot) |
| 775 | { |
| 776 | struct virtnet_info *vi = netdev_priv(dev); |
| 777 | int cpu; |
| 778 | unsigned int start; |
| 779 | |
| 780 | for_each_possible_cpu(cpu) { |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 781 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 782 | u64 tpackets, tbytes, rpackets, rbytes; |
| 783 | |
| 784 | do { |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 785 | start = u64_stats_fetch_begin_bh(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 786 | tpackets = stats->tx_packets; |
| 787 | tbytes = stats->tx_bytes; |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 788 | } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 789 | |
| 790 | do { |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 791 | start = u64_stats_fetch_begin_bh(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 792 | rpackets = stats->rx_packets; |
| 793 | rbytes = stats->rx_bytes; |
Kevin Groeneveld | e390648 | 2012-07-21 06:30:50 +0000 | [diff] [blame] | 794 | } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 795 | |
| 796 | tot->rx_packets += rpackets; |
| 797 | tot->tx_packets += tpackets; |
| 798 | tot->rx_bytes += rbytes; |
| 799 | tot->tx_bytes += tbytes; |
| 800 | } |
| 801 | |
| 802 | tot->tx_dropped = dev->stats.tx_dropped; |
Rick Jones | 021ac8d | 2011-11-21 09:28:17 +0000 | [diff] [blame] | 803 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 804 | tot->rx_dropped = dev->stats.rx_dropped; |
| 805 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 806 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
| 807 | |
| 808 | return tot; |
| 809 | } |
| 810 | |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 811 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 812 | static void virtnet_netpoll(struct net_device *dev) |
| 813 | { |
| 814 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 815 | int i; |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 816 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 817 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 818 | napi_schedule(&vi->rq[i].napi); |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 819 | } |
| 820 | #endif |
| 821 | |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 822 | /* |
| 823 | * Send command via the control virtqueue and check status. Commands |
| 824 | * supported by the hypervisor, as indicated by feature bits, should |
| 825 | * never fail unless improperly formated. |
| 826 | */ |
| 827 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
| 828 | struct scatterlist *data, int out, int in) |
| 829 | { |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 830 | struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 831 | struct virtio_net_ctrl_hdr ctrl; |
| 832 | virtio_net_ctrl_ack status = ~0; |
| 833 | unsigned int tmp; |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 834 | int i; |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 835 | |
Alexander Beregalov | 0ee904c | 2009-04-11 14:50:23 +0000 | [diff] [blame] | 836 | /* Caller should know better */ |
| 837 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || |
| 838 | (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 839 | |
| 840 | out++; /* Add header */ |
| 841 | in++; /* Add return status */ |
| 842 | |
| 843 | ctrl.class = class; |
| 844 | ctrl.cmd = cmd; |
| 845 | |
| 846 | sg_init_table(sg, out + in); |
| 847 | |
| 848 | sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 849 | for_each_sg(data, s, out + in - 2, i) |
| 850 | sg_set_buf(&sg[i + 1], sg_virt(s), s->length); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 851 | sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); |
| 852 | |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 853 | BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 854 | |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 855 | virtqueue_kick(vi->cvq); |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 856 | |
| 857 | /* |
| 858 | * Spin for a response, the kick causes an ioport write, trapping |
| 859 | * into the hypervisor, so the request should be handled immediately. |
| 860 | */ |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 861 | while (!virtqueue_get_buf(vi->cvq, &tmp)) |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 862 | cpu_relax(); |
| 863 | |
| 864 | return status == VIRTIO_NET_OK; |
| 865 | } |
| 866 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 867 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 868 | { |
| 869 | rtnl_lock(); |
| 870 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
| 871 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, |
| 872 | 0, 0)) |
| 873 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
| 874 | rtnl_unlock(); |
| 875 | } |
| 876 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 877 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
| 878 | { |
| 879 | struct scatterlist sg; |
| 880 | struct virtio_net_ctrl_mq s; |
| 881 | struct net_device *dev = vi->dev; |
| 882 | |
| 883 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
| 884 | return 0; |
| 885 | |
| 886 | s.virtqueue_pairs = queue_pairs; |
| 887 | sg_init_one(&sg, &s, sizeof(s)); |
| 888 | |
| 889 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
| 890 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){ |
| 891 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
| 892 | queue_pairs); |
| 893 | return -EINVAL; |
| 894 | } else |
| 895 | vi->curr_queue_pairs = queue_pairs; |
| 896 | |
| 897 | return 0; |
| 898 | } |
| 899 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 900 | static int virtnet_close(struct net_device *dev) |
| 901 | { |
| 902 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 903 | int i; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 904 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 905 | /* Make sure refill_work doesn't re-enable napi! */ |
| 906 | cancel_delayed_work_sync(&vi->refill); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 907 | |
| 908 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 909 | napi_disable(&vi->rq[i].napi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 910 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 911 | return 0; |
| 912 | } |
| 913 | |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 914 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 915 | { |
| 916 | struct virtnet_info *vi = netdev_priv(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 917 | struct scatterlist sg[2]; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 918 | u8 promisc, allmulti; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 919 | struct virtio_net_ctrl_mac *mac_data; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 920 | struct netdev_hw_addr *ha; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 921 | int uc_count; |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 922 | int mc_count; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 923 | void *buf; |
| 924 | int i; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 925 | |
| 926 | /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ |
| 927 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 928 | return; |
| 929 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 930 | promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 931 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 932 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 933 | sg_init_one(sg, &promisc, sizeof(promisc)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 934 | |
| 935 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 936 | VIRTIO_NET_CTRL_RX_PROMISC, |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 937 | sg, 1, 0)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 938 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
| 939 | promisc ? "en" : "dis"); |
| 940 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 941 | sg_init_one(sg, &allmulti, sizeof(allmulti)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 942 | |
| 943 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 944 | VIRTIO_NET_CTRL_RX_ALLMULTI, |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 945 | sg, 1, 0)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 946 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
| 947 | allmulti ? "en" : "dis"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 948 | |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 949 | uc_count = netdev_uc_count(dev); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 950 | mc_count = netdev_mc_count(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 951 | /* MAC filter - use one buffer for both lists */ |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 952 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 953 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 954 | mac_data = buf; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 955 | if (!buf) { |
| 956 | dev_warn(&dev->dev, "No memory for MAC address buffer\n"); |
| 957 | return; |
| 958 | } |
| 959 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 960 | sg_init_table(sg, 2); |
| 961 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 962 | /* Store the unicast list and count in the front of the buffer */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 963 | mac_data->entries = uc_count; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 964 | i = 0; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 965 | netdev_for_each_uc_addr(ha, dev) |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 966 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 967 | |
| 968 | sg_set_buf(&sg[0], mac_data, |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 969 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 970 | |
| 971 | /* multicast list and count fill the end */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 972 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 973 | |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 974 | mac_data->entries = mc_count; |
Jiri Pirko | 567ec87 | 2010-02-23 23:17:07 +0000 | [diff] [blame] | 975 | i = 0; |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 976 | netdev_for_each_mc_addr(ha, dev) |
| 977 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 978 | |
| 979 | sg_set_buf(&sg[1], mac_data, |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 980 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 981 | |
| 982 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
| 983 | VIRTIO_NET_CTRL_MAC_TABLE_SET, |
| 984 | sg, 2, 0)) |
| 985 | dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); |
| 986 | |
| 987 | kfree(buf); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 988 | } |
| 989 | |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 990 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 991 | { |
| 992 | struct virtnet_info *vi = netdev_priv(dev); |
| 993 | struct scatterlist sg; |
| 994 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 995 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 996 | |
| 997 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 998 | VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) |
| 999 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1000 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1001 | } |
| 1002 | |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1003 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1004 | { |
| 1005 | struct virtnet_info *vi = netdev_priv(dev); |
| 1006 | struct scatterlist sg; |
| 1007 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1008 | sg_init_one(&sg, &vid, sizeof(vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1009 | |
| 1010 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 1011 | VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) |
| 1012 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1013 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1014 | } |
| 1015 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1016 | static void virtnet_set_affinity(struct virtnet_info *vi, bool set) |
| 1017 | { |
| 1018 | int i; |
| 1019 | |
| 1020 | /* In multiqueue mode, when the number of cpu is equal to the number of |
| 1021 | * queue pairs, we let the queue pairs to be private to one cpu by |
| 1022 | * setting the affinity hint to eliminate the contention. |
| 1023 | */ |
| 1024 | if ((vi->curr_queue_pairs == 1 || |
| 1025 | vi->max_queue_pairs != num_online_cpus()) && set) { |
| 1026 | if (vi->affinity_hint_set) |
| 1027 | set = false; |
| 1028 | else |
| 1029 | return; |
| 1030 | } |
| 1031 | |
| 1032 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1033 | int cpu = set ? i : -1; |
| 1034 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
| 1035 | virtqueue_set_affinity(vi->sq[i].vq, cpu); |
| 1036 | } |
| 1037 | |
| 1038 | if (set) |
| 1039 | vi->affinity_hint_set = true; |
| 1040 | else |
| 1041 | vi->affinity_hint_set = false; |
| 1042 | } |
| 1043 | |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1044 | static void virtnet_get_ringparam(struct net_device *dev, |
| 1045 | struct ethtool_ringparam *ring) |
| 1046 | { |
| 1047 | struct virtnet_info *vi = netdev_priv(dev); |
| 1048 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1049 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
| 1050 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1051 | ring->rx_pending = ring->rx_max_pending; |
| 1052 | ring->tx_pending = ring->tx_max_pending; |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1053 | } |
| 1054 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1055 | |
| 1056 | static void virtnet_get_drvinfo(struct net_device *dev, |
| 1057 | struct ethtool_drvinfo *info) |
| 1058 | { |
| 1059 | struct virtnet_info *vi = netdev_priv(dev); |
| 1060 | struct virtio_device *vdev = vi->vdev; |
| 1061 | |
| 1062 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 1063 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
| 1064 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
| 1065 | |
| 1066 | } |
| 1067 | |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1068 | /* TODO: Eliminate OOO packets during switching */ |
| 1069 | static int virtnet_set_channels(struct net_device *dev, |
| 1070 | struct ethtool_channels *channels) |
| 1071 | { |
| 1072 | struct virtnet_info *vi = netdev_priv(dev); |
| 1073 | u16 queue_pairs = channels->combined_count; |
| 1074 | int err; |
| 1075 | |
| 1076 | /* We don't support separate rx/tx channels. |
| 1077 | * We don't allow setting 'other' channels. |
| 1078 | */ |
| 1079 | if (channels->rx_count || channels->tx_count || channels->other_count) |
| 1080 | return -EINVAL; |
| 1081 | |
| 1082 | if (queue_pairs > vi->max_queue_pairs) |
| 1083 | return -EINVAL; |
| 1084 | |
| 1085 | err = virtnet_set_queues(vi, queue_pairs); |
| 1086 | if (!err) { |
| 1087 | netif_set_real_num_tx_queues(dev, queue_pairs); |
| 1088 | netif_set_real_num_rx_queues(dev, queue_pairs); |
| 1089 | |
| 1090 | virtnet_set_affinity(vi, true); |
| 1091 | } |
| 1092 | |
| 1093 | return err; |
| 1094 | } |
| 1095 | |
| 1096 | static void virtnet_get_channels(struct net_device *dev, |
| 1097 | struct ethtool_channels *channels) |
| 1098 | { |
| 1099 | struct virtnet_info *vi = netdev_priv(dev); |
| 1100 | |
| 1101 | channels->combined_count = vi->curr_queue_pairs; |
| 1102 | channels->max_combined = vi->max_queue_pairs; |
| 1103 | channels->max_other = 0; |
| 1104 | channels->rx_count = 0; |
| 1105 | channels->tx_count = 0; |
| 1106 | channels->other_count = 0; |
| 1107 | } |
| 1108 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 1109 | static const struct ethtool_ops virtnet_ethtool_ops = { |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1110 | .get_drvinfo = virtnet_get_drvinfo, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1111 | .get_link = ethtool_op_get_link, |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1112 | .get_ringparam = virtnet_get_ringparam, |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1113 | .set_channels = virtnet_set_channels, |
| 1114 | .get_channels = virtnet_get_channels, |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1115 | }; |
| 1116 | |
Mark McLoughlin | 39da581 | 2008-11-26 13:58:11 +0000 | [diff] [blame] | 1117 | #define MIN_MTU 68 |
| 1118 | #define MAX_MTU 65535 |
| 1119 | |
| 1120 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) |
| 1121 | { |
| 1122 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) |
| 1123 | return -EINVAL; |
| 1124 | dev->mtu = new_mtu; |
| 1125 | return 0; |
| 1126 | } |
| 1127 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1128 | /* To avoid contending a lock hold by a vcpu who would exit to host, select the |
| 1129 | * txq based on the processor id. |
| 1130 | * TODO: handle cpu hotplug. |
| 1131 | */ |
| 1132 | static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) |
| 1133 | { |
| 1134 | int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : |
| 1135 | smp_processor_id(); |
| 1136 | |
| 1137 | while (unlikely(txq >= dev->real_num_tx_queues)) |
| 1138 | txq -= dev->real_num_tx_queues; |
| 1139 | |
| 1140 | return txq; |
| 1141 | } |
| 1142 | |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1143 | static const struct net_device_ops virtnet_netdev = { |
| 1144 | .ndo_open = virtnet_open, |
| 1145 | .ndo_stop = virtnet_close, |
| 1146 | .ndo_start_xmit = start_xmit, |
| 1147 | .ndo_validate_addr = eth_validate_addr, |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1148 | .ndo_set_mac_address = virtnet_set_mac_address, |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1149 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1150 | .ndo_change_mtu = virtnet_change_mtu, |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1151 | .ndo_get_stats64 = virtnet_stats, |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 1152 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 1153 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1154 | .ndo_select_queue = virtnet_select_queue, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1155 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1156 | .ndo_poll_controller = virtnet_netpoll, |
| 1157 | #endif |
| 1158 | }; |
| 1159 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1160 | static void virtnet_config_changed_work(struct work_struct *work) |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1161 | { |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1162 | struct virtnet_info *vi = |
| 1163 | container_of(work, struct virtnet_info, config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1164 | u16 v; |
| 1165 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1166 | mutex_lock(&vi->config_lock); |
| 1167 | if (!vi->config_enable) |
| 1168 | goto done; |
| 1169 | |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1170 | if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1171 | offsetof(struct virtio_net_config, status), |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1172 | &v) < 0) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1173 | goto done; |
| 1174 | |
| 1175 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
Amerigo Wang | ee89bab | 2012-08-09 22:14:56 +0000 | [diff] [blame] | 1176 | netdev_notify_peers(vi->dev); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1177 | virtnet_ack_link_announce(vi); |
| 1178 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1179 | |
| 1180 | /* Ignore unknown (future) status bits */ |
| 1181 | v &= VIRTIO_NET_S_LINK_UP; |
| 1182 | |
| 1183 | if (vi->status == v) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1184 | goto done; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1185 | |
| 1186 | vi->status = v; |
| 1187 | |
| 1188 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
| 1189 | netif_carrier_on(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1190 | netif_tx_wake_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1191 | } else { |
| 1192 | netif_carrier_off(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1193 | netif_tx_stop_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1194 | } |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1195 | done: |
| 1196 | mutex_unlock(&vi->config_lock); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1197 | } |
| 1198 | |
| 1199 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 1200 | { |
| 1201 | struct virtnet_info *vi = vdev->priv; |
| 1202 | |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1203 | schedule_work(&vi->config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1204 | } |
| 1205 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1206 | static void virtnet_free_queues(struct virtnet_info *vi) |
| 1207 | { |
| 1208 | kfree(vi->rq); |
| 1209 | kfree(vi->sq); |
| 1210 | } |
| 1211 | |
| 1212 | static void free_receive_bufs(struct virtnet_info *vi) |
| 1213 | { |
| 1214 | int i; |
| 1215 | |
| 1216 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1217 | while (vi->rq[i].pages) |
| 1218 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); |
| 1219 | } |
| 1220 | } |
| 1221 | |
| 1222 | static void free_unused_bufs(struct virtnet_info *vi) |
| 1223 | { |
| 1224 | void *buf; |
| 1225 | int i; |
| 1226 | |
| 1227 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1228 | struct virtqueue *vq = vi->sq[i].vq; |
| 1229 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) |
| 1230 | dev_kfree_skb(buf); |
| 1231 | } |
| 1232 | |
| 1233 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1234 | struct virtqueue *vq = vi->rq[i].vq; |
| 1235 | |
| 1236 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
| 1237 | if (vi->mergeable_rx_bufs || vi->big_packets) |
| 1238 | give_pages(&vi->rq[i], buf); |
| 1239 | else |
| 1240 | dev_kfree_skb(buf); |
| 1241 | --vi->rq[i].num; |
| 1242 | } |
| 1243 | BUG_ON(vi->rq[i].num != 0); |
| 1244 | } |
| 1245 | } |
| 1246 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1247 | static void virtnet_del_vqs(struct virtnet_info *vi) |
| 1248 | { |
| 1249 | struct virtio_device *vdev = vi->vdev; |
| 1250 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1251 | virtnet_set_affinity(vi, false); |
| 1252 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1253 | vdev->config->del_vqs(vdev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1254 | |
| 1255 | virtnet_free_queues(vi); |
| 1256 | } |
| 1257 | |
| 1258 | static int virtnet_find_vqs(struct virtnet_info *vi) |
| 1259 | { |
| 1260 | vq_callback_t **callbacks; |
| 1261 | struct virtqueue **vqs; |
| 1262 | int ret = -ENOMEM; |
| 1263 | int i, total_vqs; |
| 1264 | const char **names; |
| 1265 | |
| 1266 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by |
| 1267 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by |
| 1268 | * possible control vq. |
| 1269 | */ |
| 1270 | total_vqs = vi->max_queue_pairs * 2 + |
| 1271 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); |
| 1272 | |
| 1273 | /* Allocate space for find_vqs parameters */ |
| 1274 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); |
| 1275 | if (!vqs) |
| 1276 | goto err_vq; |
| 1277 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); |
| 1278 | if (!callbacks) |
| 1279 | goto err_callback; |
| 1280 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); |
| 1281 | if (!names) |
| 1282 | goto err_names; |
| 1283 | |
| 1284 | /* Parameters for control virtqueue, if any */ |
| 1285 | if (vi->has_cvq) { |
| 1286 | callbacks[total_vqs - 1] = NULL; |
| 1287 | names[total_vqs - 1] = "control"; |
| 1288 | } |
| 1289 | |
| 1290 | /* Allocate/initialize parameters for send/receive virtqueues */ |
| 1291 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1292 | callbacks[rxq2vq(i)] = skb_recv_done; |
| 1293 | callbacks[txq2vq(i)] = skb_xmit_done; |
| 1294 | sprintf(vi->rq[i].name, "input.%d", i); |
| 1295 | sprintf(vi->sq[i].name, "output.%d", i); |
| 1296 | names[rxq2vq(i)] = vi->rq[i].name; |
| 1297 | names[txq2vq(i)] = vi->sq[i].name; |
| 1298 | } |
| 1299 | |
| 1300 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
| 1301 | names); |
| 1302 | if (ret) |
| 1303 | goto err_find; |
| 1304 | |
| 1305 | if (vi->has_cvq) { |
| 1306 | vi->cvq = vqs[total_vqs - 1]; |
| 1307 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
| 1308 | vi->dev->features |= NETIF_F_HW_VLAN_FILTER; |
| 1309 | } |
| 1310 | |
| 1311 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1312 | vi->rq[i].vq = vqs[rxq2vq(i)]; |
| 1313 | vi->sq[i].vq = vqs[txq2vq(i)]; |
| 1314 | } |
| 1315 | |
| 1316 | kfree(names); |
| 1317 | kfree(callbacks); |
| 1318 | kfree(vqs); |
| 1319 | |
| 1320 | return 0; |
| 1321 | |
| 1322 | err_find: |
| 1323 | kfree(names); |
| 1324 | err_names: |
| 1325 | kfree(callbacks); |
| 1326 | err_callback: |
| 1327 | kfree(vqs); |
| 1328 | err_vq: |
| 1329 | return ret; |
| 1330 | } |
| 1331 | |
| 1332 | static int virtnet_alloc_queues(struct virtnet_info *vi) |
| 1333 | { |
| 1334 | int i; |
| 1335 | |
| 1336 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
| 1337 | if (!vi->sq) |
| 1338 | goto err_sq; |
| 1339 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); |
Amerigo Wang | 008d427 | 2012-12-10 02:24:08 +0000 | [diff] [blame] | 1340 | if (!vi->rq) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1341 | goto err_rq; |
| 1342 | |
| 1343 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
| 1344 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1345 | vi->rq[i].pages = NULL; |
| 1346 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, |
| 1347 | napi_weight); |
| 1348 | |
| 1349 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
| 1350 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
| 1351 | } |
| 1352 | |
| 1353 | return 0; |
| 1354 | |
| 1355 | err_rq: |
| 1356 | kfree(vi->sq); |
| 1357 | err_sq: |
| 1358 | return -ENOMEM; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1359 | } |
| 1360 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1361 | static int init_vqs(struct virtnet_info *vi) |
| 1362 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1363 | int ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1364 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1365 | /* Allocate send & receive queues */ |
| 1366 | ret = virtnet_alloc_queues(vi); |
| 1367 | if (ret) |
| 1368 | goto err; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1369 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1370 | ret = virtnet_find_vqs(vi); |
| 1371 | if (ret) |
| 1372 | goto err_free; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1373 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1374 | virtnet_set_affinity(vi, true); |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1375 | return 0; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1376 | |
| 1377 | err_free: |
| 1378 | virtnet_free_queues(vi); |
| 1379 | err: |
| 1380 | return ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1381 | } |
| 1382 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1383 | static int virtnet_probe(struct virtio_device *vdev) |
| 1384 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1385 | int i, err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1386 | struct net_device *dev; |
| 1387 | struct virtnet_info *vi; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1388 | u16 max_queue_pairs; |
| 1389 | |
| 1390 | /* Find if host supports multiqueue virtio_net device */ |
| 1391 | err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, |
| 1392 | offsetof(struct virtio_net_config, |
| 1393 | max_virtqueue_pairs), &max_queue_pairs); |
| 1394 | |
| 1395 | /* We need at least 2 queue's */ |
| 1396 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
| 1397 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
| 1398 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 1399 | max_queue_pairs = 1; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1400 | |
| 1401 | /* Allocate ourselves a network device with room for our info */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1402 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1403 | if (!dev) |
| 1404 | return -ENOMEM; |
| 1405 | |
| 1406 | /* Set up network device as normal. */ |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1407 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1408 | dev->netdev_ops = &virtnet_netdev; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1409 | dev->features = NETIF_F_HIGHDMA; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1410 | |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1411 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1412 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 1413 | |
| 1414 | /* Do we support "hardware" checksums? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1415 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1416 | /* This opens up the world of extra features. */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1417 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 1418 | if (csum) |
| 1419 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
| 1420 | |
| 1421 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
| 1422 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1423 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 1424 | } |
Rusty Russell | 5539ae96 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1425 | /* Individual feature bits: what can host handle? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 1426 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 1427 | dev->hw_features |= NETIF_F_TSO; |
| 1428 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 1429 | dev->hw_features |= NETIF_F_TSO6; |
| 1430 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 1431 | dev->hw_features |= NETIF_F_TSO_ECN; |
| 1432 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
| 1433 | dev->hw_features |= NETIF_F_UFO; |
| 1434 | |
| 1435 | if (gso) |
| 1436 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
| 1437 | /* (!csum && gso) case will be fixed by register_netdev() */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1438 | } |
| 1439 | |
| 1440 | /* Configuration may specify what MAC to use. Otherwise random. */ |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1441 | if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, |
Rusty Russell | a586d4f | 2008-02-04 23:49:56 -0500 | [diff] [blame] | 1442 | offsetof(struct virtio_net_config, mac), |
Sasha Levin | 77dd769 | 2011-08-14 17:52:33 +0300 | [diff] [blame] | 1443 | dev->dev_addr, dev->addr_len) < 0) |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 1444 | eth_hw_addr_random(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1445 | |
| 1446 | /* Set up our device-specific information */ |
| 1447 | vi = netdev_priv(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1448 | vi->dev = dev; |
| 1449 | vi->vdev = vdev; |
Christian Borntraeger | d9d5dcc | 2008-02-18 10:02:51 +0100 | [diff] [blame] | 1450 | vdev->priv = vi; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1451 | vi->stats = alloc_percpu(struct virtnet_stats); |
| 1452 | err = -ENOMEM; |
| 1453 | if (vi->stats == NULL) |
| 1454 | goto free; |
| 1455 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1456 | mutex_init(&vi->config_lock); |
| 1457 | vi->config_enable = true; |
| 1458 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1459 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1460 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 1461 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 1462 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 1463 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1464 | vi->big_packets = true; |
| 1465 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1466 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 1467 | vi->mergeable_rx_bufs = true; |
| 1468 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1469 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 1470 | vi->has_cvq = true; |
| 1471 | |
| 1472 | /* Use single tx/rx queue pair as default */ |
| 1473 | vi->curr_queue_pairs = 1; |
| 1474 | vi->max_queue_pairs = max_queue_pairs; |
| 1475 | |
| 1476 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 1477 | err = init_vqs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1478 | if (err) |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1479 | goto free_stats; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1480 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1481 | netif_set_real_num_tx_queues(dev, 1); |
| 1482 | netif_set_real_num_rx_queues(dev, 1); |
| 1483 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1484 | err = register_netdev(dev); |
| 1485 | if (err) { |
| 1486 | pr_debug("virtio_net: registering device failed\n"); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1487 | goto free_vqs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1488 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1489 | |
| 1490 | /* Last of all, set up some receive buffers. */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1491 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1492 | try_fill_recv(&vi->rq[i], GFP_KERNEL); |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1493 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1494 | /* If we didn't even get one input buffer, we're useless. */ |
| 1495 | if (vi->rq[i].num == 0) { |
| 1496 | free_unused_bufs(vi); |
| 1497 | err = -ENOMEM; |
| 1498 | goto free_recv_bufs; |
| 1499 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1500 | } |
| 1501 | |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 1502 | /* Assume link up if device can't report link status, |
| 1503 | otherwise get link status from config. */ |
| 1504 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
| 1505 | netif_carrier_off(dev); |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1506 | schedule_work(&vi->config_work); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 1507 | } else { |
| 1508 | vi->status = VIRTIO_NET_S_LINK_UP; |
| 1509 | netif_carrier_on(dev); |
| 1510 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1511 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1512 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
| 1513 | dev->name, max_queue_pairs); |
| 1514 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1515 | return 0; |
| 1516 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1517 | free_recv_bufs: |
| 1518 | free_receive_bufs(vi); |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 1519 | unregister_netdev(dev); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1520 | free_vqs: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1521 | cancel_delayed_work_sync(&vi->refill); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1522 | virtnet_del_vqs(vi); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1523 | free_stats: |
| 1524 | free_percpu(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1525 | free: |
| 1526 | free_netdev(dev); |
| 1527 | return err; |
| 1528 | } |
| 1529 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1530 | static void remove_vq_common(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1531 | { |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1532 | vi->vdev->config->reset(vi->vdev); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 1533 | |
| 1534 | /* Free unused buffers in both send and recv, if any. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1535 | free_unused_bufs(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1536 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1537 | free_receive_bufs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 1538 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1539 | virtnet_del_vqs(vi); |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1540 | } |
| 1541 | |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 1542 | static void virtnet_remove(struct virtio_device *vdev) |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1543 | { |
| 1544 | struct virtnet_info *vi = vdev->priv; |
| 1545 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1546 | /* Prevent config work handler from accessing the device. */ |
| 1547 | mutex_lock(&vi->config_lock); |
| 1548 | vi->config_enable = false; |
| 1549 | mutex_unlock(&vi->config_lock); |
| 1550 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 1551 | unregister_netdev(vi->dev); |
| 1552 | |
| 1553 | remove_vq_common(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 1554 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1555 | flush_work(&vi->config_work); |
| 1556 | |
Krishna Kumar | 2e66f55 | 2011-07-20 03:56:02 +0000 | [diff] [blame] | 1557 | free_percpu(vi->stats); |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 1558 | free_netdev(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1559 | } |
| 1560 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1561 | #ifdef CONFIG_PM |
| 1562 | static int virtnet_freeze(struct virtio_device *vdev) |
| 1563 | { |
| 1564 | struct virtnet_info *vi = vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1565 | int i; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1566 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1567 | /* Prevent config work handler from accessing the device */ |
| 1568 | mutex_lock(&vi->config_lock); |
| 1569 | vi->config_enable = false; |
| 1570 | mutex_unlock(&vi->config_lock); |
| 1571 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1572 | netif_device_detach(vi->dev); |
| 1573 | cancel_delayed_work_sync(&vi->refill); |
| 1574 | |
| 1575 | if (netif_running(vi->dev)) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1576 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1577 | napi_disable(&vi->rq[i].napi); |
| 1578 | netif_napi_del(&vi->rq[i].napi); |
| 1579 | } |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1580 | |
| 1581 | remove_vq_common(vi); |
| 1582 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1583 | flush_work(&vi->config_work); |
| 1584 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1585 | return 0; |
| 1586 | } |
| 1587 | |
| 1588 | static int virtnet_restore(struct virtio_device *vdev) |
| 1589 | { |
| 1590 | struct virtnet_info *vi = vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1591 | int err, i; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1592 | |
| 1593 | err = init_vqs(vi); |
| 1594 | if (err) |
| 1595 | return err; |
| 1596 | |
| 1597 | if (netif_running(vi->dev)) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1598 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1599 | virtnet_napi_enable(&vi->rq[i]); |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1600 | |
| 1601 | netif_device_attach(vi->dev); |
| 1602 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1603 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1604 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
| 1605 | schedule_delayed_work(&vi->refill, 0); |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1606 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1607 | mutex_lock(&vi->config_lock); |
| 1608 | vi->config_enable = true; |
| 1609 | mutex_unlock(&vi->config_lock); |
| 1610 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1611 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
| 1612 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1613 | return 0; |
| 1614 | } |
| 1615 | #endif |
| 1616 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1617 | static struct virtio_device_id id_table[] = { |
| 1618 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 1619 | { 0 }, |
| 1620 | }; |
| 1621 | |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1622 | static unsigned int features[] = { |
Mark McLoughlin | 5e4fe5c | 2008-07-08 17:10:42 +1000 | [diff] [blame] | 1623 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
| 1624 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1625 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 1626 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
Sridhar Samudrala | 5c51675 | 2009-07-14 14:21:02 +0000 | [diff] [blame] | 1627 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 1628 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1629 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1630 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1631 | }; |
| 1632 | |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1633 | static struct virtio_driver virtio_net_driver = { |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 1634 | .feature_table = features, |
| 1635 | .feature_table_size = ARRAY_SIZE(features), |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1636 | .driver.name = KBUILD_MODNAME, |
| 1637 | .driver.owner = THIS_MODULE, |
| 1638 | .id_table = id_table, |
| 1639 | .probe = virtnet_probe, |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 1640 | .remove = virtnet_remove, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1641 | .config_changed = virtnet_config_changed, |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 1642 | #ifdef CONFIG_PM |
| 1643 | .freeze = virtnet_freeze, |
| 1644 | .restore = virtnet_restore, |
| 1645 | #endif |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1646 | }; |
| 1647 | |
| 1648 | static int __init init(void) |
| 1649 | { |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1650 | return register_virtio_driver(&virtio_net_driver); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1651 | } |
| 1652 | |
| 1653 | static void __exit fini(void) |
| 1654 | { |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 1655 | unregister_virtio_driver(&virtio_net_driver); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1656 | } |
| 1657 | module_init(init); |
| 1658 | module_exit(fini); |
| 1659 | |
| 1660 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 1661 | MODULE_DESCRIPTION("Virtio network driver"); |
| 1662 | MODULE_LICENSE("GPL"); |