Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1 | /* A network driver using virtio. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2 | * |
| 3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
Jeff Kirsher | adf8d3f | 2013-12-06 06:28:47 -0800 | [diff] [blame] | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 17 | */ |
| 18 | //#define DEBUG |
| 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/etherdevice.h> |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 21 | #include <linux/ethtool.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 22 | #include <linux/module.h> |
| 23 | #include <linux/virtio.h> |
| 24 | #include <linux/virtio_net.h> |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 25 | #include <linux/bpf.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 26 | #include <linux/scatterlist.h> |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 27 | #include <linux/if_vlan.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 29 | #include <linux/cpu.h> |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 30 | #include <linux/average.h> |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 31 | #include <net/busy_poll.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 32 | |
Amerigo Wang | d34710e | 2013-05-09 19:50:51 +0000 | [diff] [blame] | 33 | static int napi_weight = NAPI_POLL_WEIGHT; |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 34 | module_param(napi_weight, int, 0444); |
| 35 | |
Rusty Russell | eb93992 | 2011-12-19 14:08:01 +0000 | [diff] [blame] | 36 | static bool csum = true, gso = true; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 37 | module_param(csum, bool, 0444); |
| 38 | module_param(gso, bool, 0444); |
| 39 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 40 | /* FIXME: MTU in config. */ |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 41 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 42 | #define GOOD_COPY_LEN 128 |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 43 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 44 | /* RX packet size EWMA. The average packet size is used to determine the packet |
| 45 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
| 46 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
| 47 | * term, transient changes in packet size. |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 48 | */ |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 49 | DECLARE_EWMA(pkt_len, 1, 64) |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 50 | |
Michael S. Tsirkin | d0fa28f | 2017-01-23 21:37:52 +0200 | [diff] [blame] | 51 | /* With mergeable buffers we align buffer address and use the low bits to |
| 52 | * encode its true size. Buffer size is up to 1 page so we need to align to |
| 53 | * square root of page size to ensure we reserve enough bits to encode the true |
| 54 | * size. |
| 55 | */ |
| 56 | #define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) |
| 57 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 58 | /* Minimum alignment for mergeable packet buffers. */ |
Michael S. Tsirkin | d0fa28f | 2017-01-23 21:37:52 +0200 | [diff] [blame] | 59 | #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ |
| 60 | 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 61 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 62 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 63 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 64 | struct virtnet_stats { |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 65 | struct u64_stats_sync tx_syncp; |
| 66 | struct u64_stats_sync rx_syncp; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 67 | u64 tx_bytes; |
| 68 | u64 tx_packets; |
| 69 | |
| 70 | u64 rx_bytes; |
| 71 | u64 rx_packets; |
| 72 | }; |
| 73 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 74 | /* Internal representation of a send virtqueue */ |
| 75 | struct send_queue { |
| 76 | /* Virtqueue associated with this send _queue */ |
| 77 | struct virtqueue *vq; |
| 78 | |
| 79 | /* TX: fragments + linear part + virtio header */ |
| 80 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 81 | |
| 82 | /* Name of the send queue: output.$index */ |
| 83 | char name[40]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 84 | }; |
| 85 | |
| 86 | /* Internal representation of a receive virtqueue */ |
| 87 | struct receive_queue { |
| 88 | /* Virtqueue associated with this receive_queue */ |
| 89 | struct virtqueue *vq; |
| 90 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 91 | struct napi_struct napi; |
| 92 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 93 | struct bpf_prog __rcu *xdp_prog; |
| 94 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 95 | /* Chain pages by the private ptr. */ |
| 96 | struct page *pages; |
| 97 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 98 | /* Average packet length for mergeable receive buffers. */ |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 99 | struct ewma_pkt_len mrg_avg_pkt_len; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 100 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 101 | /* Page frag for packet buffer allocation. */ |
| 102 | struct page_frag alloc_frag; |
| 103 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 104 | /* RX: fragments + linear part + virtio header */ |
| 105 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 106 | |
| 107 | /* Name of this receive queue: input.$index */ |
| 108 | char name[40]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 109 | }; |
| 110 | |
| 111 | struct virtnet_info { |
| 112 | struct virtio_device *vdev; |
| 113 | struct virtqueue *cvq; |
| 114 | struct net_device *dev; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 115 | struct send_queue *sq; |
| 116 | struct receive_queue *rq; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 117 | unsigned int status; |
| 118 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 119 | /* Max # of queue pairs supported by the device */ |
| 120 | u16 max_queue_pairs; |
| 121 | |
| 122 | /* # of queue pairs currently used by the driver */ |
| 123 | u16 curr_queue_pairs; |
| 124 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 125 | /* # of XDP queue pairs currently used by the driver */ |
| 126 | u16 xdp_queue_pairs; |
| 127 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 128 | /* I like... big packets and I cannot lie! */ |
| 129 | bool big_packets; |
| 130 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 131 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 132 | bool mergeable_rx_bufs; |
| 133 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 134 | /* Has control virtqueue */ |
| 135 | bool has_cvq; |
| 136 | |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 137 | /* Host can handle any s/g split between our header and packet data */ |
| 138 | bool any_header_sg; |
| 139 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 140 | /* Packet virtio header size */ |
| 141 | u8 hdr_len; |
| 142 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 143 | /* Active statistics */ |
| 144 | struct virtnet_stats __percpu *stats; |
| 145 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 146 | /* Work struct for refilling if we run low on memory. */ |
| 147 | struct delayed_work refill; |
| 148 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 149 | /* Work struct for config space updates */ |
| 150 | struct work_struct config_work; |
| 151 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 152 | /* Does the affinity hint is set for virtqueues? */ |
| 153 | bool affinity_hint_set; |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 154 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 155 | /* CPU hotplug instances for online & dead */ |
| 156 | struct hlist_node node; |
| 157 | struct hlist_node node_dead; |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 158 | |
| 159 | /* Control VQ buffers: protected by the rtnl lock */ |
| 160 | struct virtio_net_ctrl_hdr ctrl_hdr; |
| 161 | virtio_net_ctrl_ack ctrl_status; |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 162 | struct virtio_net_ctrl_mq ctrl_mq; |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 163 | u8 ctrl_promisc; |
| 164 | u8 ctrl_allmulti; |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 165 | u16 ctrl_vid; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 166 | |
| 167 | /* Ethtool settings */ |
| 168 | u8 duplex; |
| 169 | u32 speed; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 170 | }; |
| 171 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 172 | struct padded_vnet_hdr { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 173 | struct virtio_net_hdr_mrg_rxbuf hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 174 | /* |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 175 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
| 176 | * with this header sg. This padding makes next sg 16 byte aligned |
| 177 | * after the header. |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 178 | */ |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 179 | char padding[4]; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 180 | }; |
| 181 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 182 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
| 183 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
| 184 | */ |
| 185 | static int vq2txq(struct virtqueue *vq) |
| 186 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 187 | return (vq->index - 1) / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | static int txq2vq(int txq) |
| 191 | { |
| 192 | return txq * 2 + 1; |
| 193 | } |
| 194 | |
| 195 | static int vq2rxq(struct virtqueue *vq) |
| 196 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 197 | return vq->index / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | static int rxq2vq(int rxq) |
| 201 | { |
| 202 | return rxq * 2; |
| 203 | } |
| 204 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 205 | static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 206 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 207 | return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 208 | } |
| 209 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 210 | /* |
| 211 | * private is used to chain pages for big packets, put the whole |
| 212 | * most recent used list in the beginning for reuse |
| 213 | */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 214 | static void give_pages(struct receive_queue *rq, struct page *page) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 215 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 216 | struct page *end; |
| 217 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 218 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 219 | for (end = page; end->private; end = (struct page *)end->private); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 220 | end->private = (unsigned long)rq->pages; |
| 221 | rq->pages = page; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 222 | } |
| 223 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 224 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 225 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 226 | struct page *p = rq->pages; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 227 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 228 | if (p) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 229 | rq->pages = (struct page *)p->private; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 230 | /* clear private here, it is used to chain pages */ |
| 231 | p->private = 0; |
| 232 | } else |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 233 | p = alloc_page(gfp_mask); |
| 234 | return p; |
| 235 | } |
| 236 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 237 | static void skb_xmit_done(struct virtqueue *vq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 238 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 239 | struct virtnet_info *vi = vq->vdev->priv; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 240 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 241 | /* Suppress further interrupts. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 242 | virtqueue_disable_cb(vq); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 243 | |
Rusty Russell | 363f151 | 2008-06-08 20:51:55 +1000 | [diff] [blame] | 244 | /* We were probably waiting for more output buffers. */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 245 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 246 | } |
| 247 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 248 | static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) |
| 249 | { |
| 250 | unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); |
| 251 | return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; |
| 252 | } |
| 253 | |
| 254 | static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) |
| 255 | { |
| 256 | return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); |
| 257 | |
| 258 | } |
| 259 | |
| 260 | static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) |
| 261 | { |
| 262 | unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; |
| 263 | return (unsigned long)buf | (size - 1); |
| 264 | } |
| 265 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 266 | /* Called from bottom half context */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 267 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 268 | struct receive_queue *rq, |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 269 | struct page *page, unsigned int offset, |
| 270 | unsigned int len, unsigned int truesize) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 271 | { |
| 272 | struct sk_buff *skb; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 273 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 274 | unsigned int copy, hdr_len, hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 275 | char *p; |
| 276 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 277 | p = page_address(page) + offset; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 278 | |
| 279 | /* copy small packet so we can reuse these pages for small data */ |
Paolo Abeni | c67f5db | 2016-03-17 15:44:00 +0100 | [diff] [blame] | 280 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 281 | if (unlikely(!skb)) |
| 282 | return NULL; |
| 283 | |
| 284 | hdr = skb_vnet_hdr(skb); |
| 285 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 286 | hdr_len = vi->hdr_len; |
| 287 | if (vi->mergeable_rx_bufs) |
| 288 | hdr_padded_len = sizeof *hdr; |
| 289 | else |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 290 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 291 | |
| 292 | memcpy(hdr, p, hdr_len); |
| 293 | |
| 294 | len -= hdr_len; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 295 | offset += hdr_padded_len; |
| 296 | p += hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 297 | |
| 298 | copy = len; |
| 299 | if (copy > skb_tailroom(skb)) |
| 300 | copy = skb_tailroom(skb); |
| 301 | memcpy(skb_put(skb, copy), p, copy); |
| 302 | |
| 303 | len -= copy; |
| 304 | offset += copy; |
| 305 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 306 | if (vi->mergeable_rx_bufs) { |
| 307 | if (len) |
| 308 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); |
| 309 | else |
| 310 | put_page(page); |
| 311 | return skb; |
| 312 | } |
| 313 | |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 314 | /* |
| 315 | * Verify that we can indeed put this data into a skb. |
| 316 | * This is here to handle cases when the device erroneously |
| 317 | * tries to receive more than is possible. This is usually |
| 318 | * the case of a broken device. |
| 319 | */ |
| 320 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 321 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 322 | dev_kfree_skb(skb); |
| 323 | return NULL; |
| 324 | } |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 325 | BUG_ON(offset >= PAGE_SIZE); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 326 | while (len) { |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 327 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
| 328 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, |
| 329 | frag_size, truesize); |
| 330 | len -= frag_size; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 331 | page = (struct page *)page->private; |
| 332 | offset = 0; |
| 333 | } |
| 334 | |
| 335 | if (page) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 336 | give_pages(rq, page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 337 | |
| 338 | return skb; |
| 339 | } |
| 340 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 341 | static void virtnet_xdp_xmit(struct virtnet_info *vi, |
| 342 | struct receive_queue *rq, |
| 343 | struct send_queue *sq, |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 344 | struct xdp_buff *xdp, |
| 345 | void *data) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 346 | { |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 347 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
| 348 | unsigned int num_sg, len; |
| 349 | void *xdp_sent; |
| 350 | int err; |
| 351 | |
| 352 | /* Free up any pending old buffers before queueing new ones. */ |
| 353 | while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 354 | if (vi->mergeable_rx_bufs) { |
| 355 | struct page *sent_page = virt_to_head_page(xdp_sent); |
| 356 | |
| 357 | put_page(sent_page); |
| 358 | } else { /* small buffer */ |
| 359 | struct sk_buff *skb = xdp_sent; |
| 360 | |
| 361 | kfree_skb(skb); |
| 362 | } |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 363 | } |
| 364 | |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 365 | if (vi->mergeable_rx_bufs) { |
| 366 | /* Zero header and leave csum up to XDP layers */ |
| 367 | hdr = xdp->data; |
| 368 | memset(hdr, 0, vi->hdr_len); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 369 | |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 370 | num_sg = 1; |
| 371 | sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); |
| 372 | } else { /* small buffer */ |
| 373 | struct sk_buff *skb = data; |
| 374 | |
| 375 | /* Zero header and leave csum up to XDP layers */ |
| 376 | hdr = skb_vnet_hdr(skb); |
| 377 | memset(hdr, 0, vi->hdr_len); |
| 378 | |
| 379 | num_sg = 2; |
| 380 | sg_init_table(sq->sg, 2); |
| 381 | sg_set_buf(sq->sg, hdr, vi->hdr_len); |
| 382 | skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
| 383 | } |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 384 | err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 385 | data, GFP_ATOMIC); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 386 | if (unlikely(err)) { |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 387 | if (vi->mergeable_rx_bufs) { |
| 388 | struct page *page = virt_to_head_page(xdp->data); |
| 389 | |
| 390 | put_page(page); |
| 391 | } else /* small buffer */ |
| 392 | kfree_skb(data); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 393 | return; // On error abort to avoid unnecessary kick |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | virtqueue_kick(sq->vq); |
| 397 | } |
| 398 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 399 | static u32 do_xdp_prog(struct virtnet_info *vi, |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 400 | struct receive_queue *rq, |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 401 | struct bpf_prog *xdp_prog, |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 402 | void *data, int len) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 403 | { |
| 404 | int hdr_padded_len; |
| 405 | struct xdp_buff xdp; |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 406 | void *buf; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 407 | unsigned int qp; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 408 | u32 act; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 409 | |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 410 | if (vi->mergeable_rx_bufs) { |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 411 | hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 412 | xdp.data = data + hdr_padded_len; |
| 413 | xdp.data_end = xdp.data + (len - vi->hdr_len); |
| 414 | buf = data; |
| 415 | } else { /* small buffers */ |
| 416 | struct sk_buff *skb = data; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 417 | |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 418 | xdp.data = skb->data; |
| 419 | xdp.data_end = xdp.data + len; |
| 420 | buf = skb->data; |
| 421 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 422 | |
| 423 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 424 | switch (act) { |
| 425 | case XDP_PASS: |
| 426 | return XDP_PASS; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 427 | case XDP_TX: |
| 428 | qp = vi->curr_queue_pairs - |
| 429 | vi->xdp_queue_pairs + |
| 430 | smp_processor_id(); |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 431 | xdp.data = buf; |
| 432 | virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 433 | return XDP_TX; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 434 | default: |
| 435 | bpf_warn_invalid_xdp_action(act); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 436 | case XDP_ABORTED: |
| 437 | case XDP_DROP: |
| 438 | return XDP_DROP; |
| 439 | } |
| 440 | } |
| 441 | |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 442 | static struct sk_buff *receive_small(struct net_device *dev, |
| 443 | struct virtnet_info *vi, |
| 444 | struct receive_queue *rq, |
| 445 | void *buf, unsigned int len) |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 446 | { |
| 447 | struct sk_buff * skb = buf; |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 448 | struct bpf_prog *xdp_prog; |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 449 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 450 | len -= vi->hdr_len; |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 451 | skb_trim(skb, len); |
| 452 | |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 453 | rcu_read_lock(); |
| 454 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 455 | if (xdp_prog) { |
| 456 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
| 457 | u32 act; |
| 458 | |
| 459 | if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) |
| 460 | goto err_xdp; |
| 461 | act = do_xdp_prog(vi, rq, xdp_prog, skb, len); |
| 462 | switch (act) { |
| 463 | case XDP_PASS: |
| 464 | break; |
| 465 | case XDP_TX: |
| 466 | rcu_read_unlock(); |
| 467 | goto xdp_xmit; |
| 468 | case XDP_DROP: |
| 469 | default: |
| 470 | goto err_xdp; |
| 471 | } |
| 472 | } |
| 473 | rcu_read_unlock(); |
| 474 | |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 475 | return skb; |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 476 | |
| 477 | err_xdp: |
| 478 | rcu_read_unlock(); |
| 479 | dev->stats.rx_dropped++; |
| 480 | kfree_skb(skb); |
| 481 | xdp_xmit: |
| 482 | return NULL; |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | static struct sk_buff *receive_big(struct net_device *dev, |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 486 | struct virtnet_info *vi, |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 487 | struct receive_queue *rq, |
| 488 | void *buf, |
| 489 | unsigned int len) |
| 490 | { |
| 491 | struct page *page = buf; |
Jason Wang | c47a43d | 2016-12-23 22:37:31 +0800 | [diff] [blame] | 492 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 493 | |
| 494 | if (unlikely(!skb)) |
| 495 | goto err; |
| 496 | |
| 497 | return skb; |
| 498 | |
| 499 | err: |
| 500 | dev->stats.rx_dropped++; |
| 501 | give_pages(rq, page); |
| 502 | return NULL; |
| 503 | } |
| 504 | |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 505 | /* The conditions to enable XDP should preclude the underlying device from |
| 506 | * sending packets across multiple buffers (num_buf > 1). However per spec |
| 507 | * it does not appear to be illegal to do so but rather just against convention. |
| 508 | * So in order to avoid making a system unresponsive the packets are pushed |
| 509 | * into a page and the XDP program is run. This will be extremely slow and we |
| 510 | * push a warning to the user to fix this as soon as possible. Fixing this may |
| 511 | * require resolving the underlying hardware to determine why multiple buffers |
| 512 | * are being received or simply loading the XDP program in the ingress stack |
| 513 | * after the skb is built because there is no advantage to running it here |
| 514 | * anymore. |
| 515 | */ |
| 516 | static struct page *xdp_linearize_page(struct receive_queue *rq, |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 517 | u16 *num_buf, |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 518 | struct page *p, |
| 519 | int offset, |
| 520 | unsigned int *len) |
| 521 | { |
| 522 | struct page *page = alloc_page(GFP_ATOMIC); |
| 523 | unsigned int page_off = 0; |
| 524 | |
| 525 | if (!page) |
| 526 | return NULL; |
| 527 | |
| 528 | memcpy(page_address(page) + page_off, page_address(p) + offset, *len); |
| 529 | page_off += *len; |
| 530 | |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 531 | while (--*num_buf) { |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 532 | unsigned int buflen; |
| 533 | unsigned long ctx; |
| 534 | void *buf; |
| 535 | int off; |
| 536 | |
| 537 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen); |
| 538 | if (unlikely(!ctx)) |
| 539 | goto err_buf; |
| 540 | |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 541 | buf = mergeable_ctx_to_buf_address(ctx); |
| 542 | p = virt_to_head_page(buf); |
| 543 | off = buf - page_address(p); |
| 544 | |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 545 | /* guard against a misconfigured or uncooperative backend that |
| 546 | * is sending packet larger than the MTU. |
| 547 | */ |
| 548 | if ((page_off + buflen) > PAGE_SIZE) { |
| 549 | put_page(p); |
| 550 | goto err_buf; |
| 551 | } |
| 552 | |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 553 | memcpy(page_address(page) + page_off, |
| 554 | page_address(p) + off, buflen); |
| 555 | page_off += buflen; |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 556 | put_page(p); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 557 | } |
| 558 | |
| 559 | *len = page_off; |
| 560 | return page; |
| 561 | err_buf: |
| 562 | __free_pages(page, 0); |
| 563 | return NULL; |
| 564 | } |
| 565 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 566 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 567 | struct virtnet_info *vi, |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 568 | struct receive_queue *rq, |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 569 | unsigned long ctx, |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 570 | unsigned int len) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 571 | { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 572 | void *buf = mergeable_ctx_to_buf_address(ctx); |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 573 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
| 574 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 575 | struct page *page = virt_to_head_page(buf); |
| 576 | int offset = buf - page_address(page); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 577 | struct sk_buff *head_skb, *curr_skb; |
| 578 | struct bpf_prog *xdp_prog; |
| 579 | unsigned int truesize; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 580 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 581 | head_skb = NULL; |
| 582 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 583 | rcu_read_lock(); |
| 584 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 585 | if (xdp_prog) { |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 586 | struct page *xdp_page; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 587 | u32 act; |
| 588 | |
Jason Wang | 73b62bd | 2016-12-23 22:37:24 +0800 | [diff] [blame] | 589 | /* This happens when rx buffer size is underestimated */ |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 590 | if (unlikely(num_buf > 1)) { |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 591 | /* linearize data for XDP */ |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 592 | xdp_page = xdp_linearize_page(rq, &num_buf, |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 593 | page, offset, &len); |
| 594 | if (!xdp_page) |
| 595 | goto err_xdp; |
| 596 | offset = 0; |
| 597 | } else { |
| 598 | xdp_page = page; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 599 | } |
| 600 | |
| 601 | /* Transient failure which in theory could occur if |
| 602 | * in-flight packets from before XDP was enabled reach |
| 603 | * the receive path after XDP is loaded. In practice I |
| 604 | * was not able to create this condition. |
| 605 | */ |
Jason Wang | b00f70b | 2016-12-23 22:37:28 +0800 | [diff] [blame] | 606 | if (unlikely(hdr->hdr.gso_type)) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 607 | goto err_xdp; |
| 608 | |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 609 | act = do_xdp_prog(vi, rq, xdp_prog, |
| 610 | page_address(xdp_page) + offset, len); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 611 | switch (act) { |
| 612 | case XDP_PASS: |
Jason Wang | 1830f89 | 2016-12-23 22:37:27 +0800 | [diff] [blame] | 613 | /* We can only create skb based on xdp_page. */ |
| 614 | if (unlikely(xdp_page != page)) { |
| 615 | rcu_read_unlock(); |
| 616 | put_page(page); |
| 617 | head_skb = page_to_skb(vi, rq, xdp_page, |
| 618 | 0, len, PAGE_SIZE); |
Jason Wang | 5c33474 | 2016-12-23 22:37:29 +0800 | [diff] [blame] | 619 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); |
Jason Wang | 1830f89 | 2016-12-23 22:37:27 +0800 | [diff] [blame] | 620 | return head_skb; |
| 621 | } |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 622 | break; |
| 623 | case XDP_TX: |
Jason Wang | 5c33474 | 2016-12-23 22:37:29 +0800 | [diff] [blame] | 624 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 625 | if (unlikely(xdp_page != page)) |
| 626 | goto err_xdp; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 627 | rcu_read_unlock(); |
| 628 | goto xdp_xmit; |
| 629 | case XDP_DROP: |
| 630 | default: |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 631 | if (unlikely(xdp_page != page)) |
| 632 | __free_pages(xdp_page, 0); |
Jason Wang | 5c33474 | 2016-12-23 22:37:29 +0800 | [diff] [blame] | 633 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 634 | goto err_xdp; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 635 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 636 | } |
| 637 | rcu_read_unlock(); |
| 638 | |
| 639 | truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
| 640 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize); |
| 641 | curr_skb = head_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 642 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 643 | if (unlikely(!curr_skb)) |
| 644 | goto err_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 645 | while (--num_buf) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 646 | int num_skb_frags; |
| 647 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 648 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
| 649 | if (unlikely(!ctx)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 650 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 651 | dev->name, num_buf, |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 652 | virtio16_to_cpu(vi->vdev, |
| 653 | hdr->num_buffers)); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 654 | dev->stats.rx_length_errors++; |
| 655 | goto err_buf; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 656 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 657 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 658 | buf = mergeable_ctx_to_buf_address(ctx); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 659 | page = virt_to_head_page(buf); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 660 | |
| 661 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 662 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
| 663 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 664 | |
| 665 | if (unlikely(!nskb)) |
| 666 | goto err_skb; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 667 | if (curr_skb == head_skb) |
| 668 | skb_shinfo(curr_skb)->frag_list = nskb; |
| 669 | else |
| 670 | curr_skb->next = nskb; |
| 671 | curr_skb = nskb; |
| 672 | head_skb->truesize += nskb->truesize; |
| 673 | num_skb_frags = 0; |
| 674 | } |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 675 | truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 676 | if (curr_skb != head_skb) { |
| 677 | head_skb->data_len += len; |
| 678 | head_skb->len += len; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 679 | head_skb->truesize += truesize; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 680 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 681 | offset = buf - page_address(page); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 682 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
| 683 | put_page(page); |
| 684 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 685 | len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 686 | } else { |
| 687 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 688 | offset, len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 689 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 690 | } |
| 691 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 692 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 693 | return head_skb; |
| 694 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 695 | err_xdp: |
| 696 | rcu_read_unlock(); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 697 | err_skb: |
| 698 | put_page(page); |
| 699 | while (--num_buf) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 700 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
| 701 | if (unlikely(!ctx)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 702 | pr_debug("%s: rx error: %d buffers missing\n", |
| 703 | dev->name, num_buf); |
| 704 | dev->stats.rx_length_errors++; |
| 705 | break; |
| 706 | } |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 707 | page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 708 | put_page(page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 709 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 710 | err_buf: |
| 711 | dev->stats.rx_dropped++; |
| 712 | dev_kfree_skb(head_skb); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 713 | xdp_xmit: |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 714 | return NULL; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 715 | } |
| 716 | |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 717 | static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
| 718 | void *buf, unsigned int len) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 719 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 720 | struct net_device *dev = vi->dev; |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 721 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 722 | struct sk_buff *skb; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 723 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 724 | |
Michael S. Tsirkin | bcff316 | 2014-10-24 00:22:11 +0300 | [diff] [blame] | 725 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 726 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 727 | dev->stats.rx_length_errors++; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 728 | if (vi->mergeable_rx_bufs) { |
| 729 | unsigned long ctx = (unsigned long)buf; |
| 730 | void *base = mergeable_ctx_to_buf_address(ctx); |
| 731 | put_page(virt_to_head_page(base)); |
| 732 | } else if (vi->big_packets) { |
Michael Dalton | 98bfd23 | 2013-12-05 13:14:05 -0800 | [diff] [blame] | 733 | give_pages(rq, buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 734 | } else { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 735 | dev_kfree_skb(buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 736 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 737 | return; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 738 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 739 | |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 740 | if (vi->mergeable_rx_bufs) |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 741 | skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 742 | else if (vi->big_packets) |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 743 | skb = receive_big(dev, vi, rq, buf, len); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 744 | else |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 745 | skb = receive_small(dev, vi, rq, buf, len); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 746 | |
| 747 | if (unlikely(!skb)) |
| 748 | return; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 749 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 750 | hdr = skb_vnet_hdr(skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 751 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 752 | u64_stats_update_begin(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 753 | stats->rx_bytes += skb->len; |
| 754 | stats->rx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 755 | u64_stats_update_end(&stats->rx_syncp); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 756 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 757 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 758 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 759 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 760 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
| 761 | virtio_is_little_endian(vi->vdev))) { |
| 762 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", |
| 763 | dev->name, hdr->hdr.gso_type, |
| 764 | hdr->hdr.gso_size); |
| 765 | goto frame_err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 766 | } |
| 767 | |
Mike Rapoport | d1dc06d | 2016-06-14 08:29:38 +0300 | [diff] [blame] | 768 | skb->protocol = eth_type_trans(skb, dev); |
| 769 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 770 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 771 | |
Eric Dumazet | 0fbd050 | 2015-07-31 18:25:17 +0200 | [diff] [blame] | 772 | napi_gro_receive(&rq->napi, skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 773 | return; |
| 774 | |
| 775 | frame_err: |
| 776 | dev->stats.rx_frame_errors++; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 777 | dev_kfree_skb(skb); |
| 778 | } |
| 779 | |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 780 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
| 781 | gfp_t gfp) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 782 | { |
| 783 | struct sk_buff *skb; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 784 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 785 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 786 | |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 787 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 788 | if (unlikely(!skb)) |
| 789 | return -ENOMEM; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 790 | |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 791 | skb_put(skb, GOOD_PACKET_LEN); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 792 | |
| 793 | hdr = skb_vnet_hdr(skb); |
Jason Wang | 547c890 | 2015-08-27 14:53:06 +0800 | [diff] [blame] | 794 | sg_init_table(rq->sg, 2); |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 795 | sg_set_buf(rq->sg, hdr, vi->hdr_len); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 796 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 797 | |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 798 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 799 | if (err < 0) |
| 800 | dev_kfree_skb(skb); |
| 801 | |
| 802 | return err; |
| 803 | } |
| 804 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 805 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
| 806 | gfp_t gfp) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 807 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 808 | struct page *first, *list = NULL; |
| 809 | char *p; |
| 810 | int i, err, offset; |
| 811 | |
Rusty Russell | a583544 | 2014-09-11 10:17:36 +0930 | [diff] [blame] | 812 | sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); |
| 813 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 814 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 815 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 816 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 817 | if (!first) { |
| 818 | if (list) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 819 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 820 | return -ENOMEM; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 821 | } |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 822 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 823 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 824 | /* chain new page in list head to match sg */ |
| 825 | first->private = (unsigned long)list; |
| 826 | list = first; |
| 827 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 828 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 829 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 830 | if (!first) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 831 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 832 | return -ENOMEM; |
| 833 | } |
| 834 | p = page_address(first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 835 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 836 | /* rq->sg[0], rq->sg[1] share the same page */ |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 837 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
| 838 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 839 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 840 | /* rq->sg[1] for data packet, from offset */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 841 | offset = sizeof(struct padded_vnet_hdr); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 842 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 843 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 844 | /* chain first in list head */ |
| 845 | first->private = (unsigned long)list; |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 846 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
| 847 | first, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 848 | if (err < 0) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 849 | give_pages(rq, first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 850 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 851 | return err; |
| 852 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 853 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 854 | static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 855 | { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 856 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 857 | unsigned int len; |
| 858 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 859 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 860 | GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); |
| 861 | return ALIGN(len, MERGEABLE_BUFFER_ALIGN); |
| 862 | } |
| 863 | |
| 864 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) |
| 865 | { |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 866 | struct page_frag *alloc_frag = &rq->alloc_frag; |
| 867 | char *buf; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 868 | unsigned long ctx; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 869 | int err; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 870 | unsigned int len, hole; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 871 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 872 | len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 873 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 874 | return -ENOMEM; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 875 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 876 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 877 | ctx = mergeable_buf_to_ctx(buf, len); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 878 | get_page(alloc_frag->page); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 879 | alloc_frag->offset += len; |
| 880 | hole = alloc_frag->size - alloc_frag->offset; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 881 | if (hole < len) { |
| 882 | /* To avoid internal fragmentation, if there is very likely not |
| 883 | * enough space for another buffer, add the remaining space to |
| 884 | * the current buffer. This extra space is not included in |
| 885 | * the truesize stored in ctx. |
| 886 | */ |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 887 | len += hole; |
| 888 | alloc_frag->offset += hole; |
| 889 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 890 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 891 | sg_init_one(rq->sg, buf, len); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 892 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 893 | if (err < 0) |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 894 | put_page(virt_to_head_page(buf)); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 895 | |
| 896 | return err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 897 | } |
| 898 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 899 | /* |
| 900 | * Returns false if we couldn't fill entirely (OOM). |
| 901 | * |
| 902 | * Normally run in the receive path, but can also be run from ndo_open |
| 903 | * before we're receiving packets, or from refill_work which is |
| 904 | * careful to disable receiving (using napi_disable). |
| 905 | */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 906 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
| 907 | gfp_t gfp) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 908 | { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 909 | int err; |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 910 | bool oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 911 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 912 | gfp |= __GFP_COLD; |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 913 | do { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 914 | if (vi->mergeable_rx_bufs) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 915 | err = add_recvbuf_mergeable(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 916 | else if (vi->big_packets) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 917 | err = add_recvbuf_big(vi, rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 918 | else |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 919 | err = add_recvbuf_small(vi, rq, gfp); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 920 | |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 921 | oom = err == -ENOMEM; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 922 | if (err) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 923 | break; |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 924 | } while (rq->vq->num_free); |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 925 | virtqueue_kick(rq->vq); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 926 | return !oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 927 | } |
| 928 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 929 | static void skb_recv_done(struct virtqueue *rvq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 930 | { |
| 931 | struct virtnet_info *vi = rvq->vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 932 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 933 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 934 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 935 | if (napi_schedule_prep(&rq->napi)) { |
Michael S. Tsirkin | 1915a712 | 2010-04-12 16:19:04 +0300 | [diff] [blame] | 936 | virtqueue_disable_cb(rvq); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 937 | __napi_schedule(&rq->napi); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 938 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 939 | } |
| 940 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 941 | static void virtnet_napi_enable(struct receive_queue *rq) |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 942 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 943 | napi_enable(&rq->napi); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 944 | |
| 945 | /* If all buffers were filled by other side before we napi_enabled, we |
| 946 | * won't get another interrupt, so process any outstanding packets |
| 947 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
| 948 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 949 | if (napi_schedule_prep(&rq->napi)) { |
| 950 | virtqueue_disable_cb(rq->vq); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 951 | local_bh_disable(); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 952 | __napi_schedule(&rq->napi); |
Michael S. Tsirkin | ec13ee8 | 2012-05-16 10:57:12 +0300 | [diff] [blame] | 953 | local_bh_enable(); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 954 | } |
| 955 | } |
| 956 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 957 | static void refill_work(struct work_struct *work) |
| 958 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 959 | struct virtnet_info *vi = |
| 960 | container_of(work, struct virtnet_info, refill.work); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 961 | bool still_empty; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 962 | int i; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 963 | |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 964 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 965 | struct receive_queue *rq = &vi->rq[i]; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 966 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 967 | napi_disable(&rq->napi); |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 968 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 969 | virtnet_napi_enable(rq); |
| 970 | |
| 971 | /* In theory, this can happen: if we don't get any buffers in |
| 972 | * we will *never* try to fill again. |
| 973 | */ |
| 974 | if (still_empty) |
| 975 | schedule_delayed_work(&vi->refill, HZ/2); |
| 976 | } |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 977 | } |
| 978 | |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 979 | static int virtnet_receive(struct receive_queue *rq, int budget) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 980 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 981 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 982 | unsigned int len, received = 0; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 983 | void *buf; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 984 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 985 | while (received < budget && |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 986 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 987 | receive_buf(vi, rq, buf, len); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 988 | received++; |
| 989 | } |
| 990 | |
Jason Wang | be121f4 | 2014-01-16 14:45:24 +0800 | [diff] [blame] | 991 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 992 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 993 | schedule_delayed_work(&vi->refill, 0); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 994 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 995 | |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 996 | return received; |
| 997 | } |
| 998 | |
| 999 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 1000 | { |
| 1001 | struct receive_queue *rq = |
| 1002 | container_of(napi, struct receive_queue, napi); |
Li RongQing | faadb05 | 2015-03-26 15:39:45 +0800 | [diff] [blame] | 1003 | unsigned int r, received; |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1004 | |
Li RongQing | faadb05 | 2015-03-26 15:39:45 +0800 | [diff] [blame] | 1005 | received = virtnet_receive(rq, budget); |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1006 | |
Rusty Russell | 8329d98 | 2007-11-19 11:20:43 -0500 | [diff] [blame] | 1007 | /* Out of packets? */ |
| 1008 | if (received < budget) { |
Michael S. Tsirkin | cbdadbb | 2013-07-09 08:13:04 +0300 | [diff] [blame] | 1009 | r = virtqueue_enable_cb_prepare(rq->vq); |
Eric Dumazet | 0fbd050 | 2015-07-31 18:25:17 +0200 | [diff] [blame] | 1010 | napi_complete_done(napi, received); |
Michael S. Tsirkin | cbdadbb | 2013-07-09 08:13:04 +0300 | [diff] [blame] | 1011 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 1012 | napi_schedule_prep(napi)) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1013 | virtqueue_disable_cb(rq->vq); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 1014 | __napi_schedule(napi); |
Christian Borntraeger | 4265f16 | 2008-03-14 14:17:05 +0100 | [diff] [blame] | 1015 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1016 | } |
| 1017 | |
| 1018 | return received; |
| 1019 | } |
| 1020 | |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 1021 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 1022 | /* must be called with local_bh_disable()d */ |
| 1023 | static int virtnet_busy_poll(struct napi_struct *napi) |
| 1024 | { |
| 1025 | struct receive_queue *rq = |
| 1026 | container_of(napi, struct receive_queue, napi); |
| 1027 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1028 | int r, received = 0, budget = 4; |
| 1029 | |
| 1030 | if (!(vi->status & VIRTIO_NET_S_LINK_UP)) |
| 1031 | return LL_FLUSH_FAILED; |
| 1032 | |
| 1033 | if (!napi_schedule_prep(napi)) |
| 1034 | return LL_FLUSH_BUSY; |
| 1035 | |
| 1036 | virtqueue_disable_cb(rq->vq); |
| 1037 | |
| 1038 | again: |
| 1039 | received += virtnet_receive(rq, budget); |
| 1040 | |
| 1041 | r = virtqueue_enable_cb_prepare(rq->vq); |
| 1042 | clear_bit(NAPI_STATE_SCHED, &napi->state); |
| 1043 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
| 1044 | napi_schedule_prep(napi)) { |
| 1045 | virtqueue_disable_cb(rq->vq); |
| 1046 | if (received < budget) { |
| 1047 | budget -= received; |
| 1048 | goto again; |
| 1049 | } else { |
| 1050 | __napi_schedule(napi); |
| 1051 | } |
| 1052 | } |
| 1053 | |
| 1054 | return received; |
| 1055 | } |
| 1056 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 1057 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1058 | static int virtnet_open(struct net_device *dev) |
| 1059 | { |
| 1060 | struct virtnet_info *vi = netdev_priv(dev); |
| 1061 | int i; |
| 1062 | |
Jason Wang | e416662 | 2013-05-21 20:03:58 +0000 | [diff] [blame] | 1063 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1064 | if (i < vi->curr_queue_pairs) |
| 1065 | /* Make sure we have some buffers: if oom use wq. */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1066 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
Jason Wang | e416662 | 2013-05-21 20:03:58 +0000 | [diff] [blame] | 1067 | schedule_delayed_work(&vi->refill, 0); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1068 | virtnet_napi_enable(&vi->rq[i]); |
| 1069 | } |
| 1070 | |
| 1071 | return 0; |
| 1072 | } |
| 1073 | |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1074 | static void free_old_xmit_skbs(struct send_queue *sq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1075 | { |
| 1076 | struct sk_buff *skb; |
Michael S. Tsirkin | 6ee57bc | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1077 | unsigned int len; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1078 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 1079 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1080 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1081 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1082 | pr_debug("Sent skb %p\n", skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1083 | |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 1084 | u64_stats_update_begin(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1085 | stats->tx_bytes += skb->len; |
| 1086 | stats->tx_packets++; |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 1087 | u64_stats_update_end(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1088 | |
Eric Dumazet | ed79bab | 2009-10-14 14:36:43 +0000 | [diff] [blame] | 1089 | dev_kfree_skb_any(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1090 | } |
| 1091 | } |
| 1092 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1093 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1094 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1095 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1096 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1097 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Michael S. Tsirkin | 7bedc7d | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1098 | unsigned num_sg; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1099 | unsigned hdr_len = vi->hdr_len; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1100 | bool can_push; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1101 | |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 1102 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1103 | |
| 1104 | can_push = vi->any_header_sg && |
| 1105 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && |
| 1106 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; |
| 1107 | /* Even if we can, don't push here yet as this would skew |
| 1108 | * csum_start offset below. */ |
| 1109 | if (can_push) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1110 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1111 | else |
| 1112 | hdr = skb_vnet_hdr(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1113 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1114 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
Jason Wang | 6391a44 | 2017-01-20 14:32:42 +0800 | [diff] [blame] | 1115 | virtio_is_little_endian(vi->vdev), false)) |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1116 | BUG(); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1117 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1118 | if (vi->mergeable_rx_bufs) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1119 | hdr->num_buffers = 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1120 | |
Jason Wang | 547c890 | 2015-08-27 14:53:06 +0800 | [diff] [blame] | 1121 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1122 | if (can_push) { |
| 1123 | __skb_push(skb, hdr_len); |
| 1124 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); |
| 1125 | /* Pull header back to avoid skew in tx bytes calculations. */ |
| 1126 | __skb_pull(skb, hdr_len); |
| 1127 | } else { |
| 1128 | sg_set_buf(sq->sg, hdr, hdr_len); |
| 1129 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; |
| 1130 | } |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1131 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 1132 | } |
| 1133 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 1134 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1135 | { |
| 1136 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1137 | int qnum = skb_get_queue_mapping(skb); |
| 1138 | struct send_queue *sq = &vi->sq[qnum]; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1139 | int err; |
Michael S. Tsirkin | 4b7fd2e6 | 2014-10-15 16:23:28 +0300 | [diff] [blame] | 1140 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
| 1141 | bool kick = !skb->xmit_more; |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1142 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1143 | /* Free up any pending old buffers before queueing new ones. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1144 | free_old_xmit_skbs(sq); |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1145 | |
Jacob Keller | 074c358 | 2014-06-25 02:37:13 +0000 | [diff] [blame] | 1146 | /* timestamp packet in software */ |
| 1147 | skb_tx_timestamp(skb); |
| 1148 | |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 1149 | /* Try to transmit */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1150 | err = xmit_skb(sq, skb); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1151 | |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1152 | /* This should not happen! */ |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 1153 | if (unlikely(err)) { |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1154 | dev->stats.tx_fifo_errors++; |
| 1155 | if (net_ratelimit()) |
| 1156 | dev_warn(&dev->dev, |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1157 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 1158 | dev->stats.tx_dropped++; |
Eric W. Biederman | 85e9452 | 2014-03-15 18:43:33 -0700 | [diff] [blame] | 1159 | dev_kfree_skb_any(skb); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 1160 | return NETDEV_TX_OK; |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1161 | } |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 1162 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1163 | /* Don't wait up for transmitted skbs to be freed. */ |
| 1164 | skb_orphan(skb); |
| 1165 | nf_reset(skb); |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1166 | |
Michael S. Tsirkin | 60302ff | 2015-04-02 13:05:47 +0200 | [diff] [blame] | 1167 | /* If running out of space, stop queue to avoid getting packets that we |
| 1168 | * are then unable to transmit. |
| 1169 | * An alternative would be to force queuing layer to requeue the skb by |
| 1170 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be |
| 1171 | * returned in a normal path of operation: it means that driver is not |
| 1172 | * maintaining the TX queue stop/start state properly, and causes |
| 1173 | * the stack to do a non-trivial amount of useless work. |
| 1174 | * Since most packets only take 1 or 2 ring slots, stopping the queue |
| 1175 | * early means 16 slots are typically wasted. |
stephen hemminger | d631b94 | 2015-03-24 16:22:07 -0700 | [diff] [blame] | 1176 | */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1177 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1178 | netif_stop_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1179 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1180 | /* More just got used, free them then recheck. */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1181 | free_old_xmit_skbs(sq); |
| 1182 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1183 | netif_start_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1184 | virtqueue_disable_cb(sq->vq); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1185 | } |
| 1186 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1187 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1188 | |
Michael S. Tsirkin | 4b7fd2e6 | 2014-10-15 16:23:28 +0300 | [diff] [blame] | 1189 | if (kick || netif_xmit_stopped(txq)) |
David S. Miller | 0b725a2 | 2014-08-25 15:51:53 -0700 | [diff] [blame] | 1190 | virtqueue_kick(sq->vq); |
| 1191 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1192 | return NETDEV_TX_OK; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1193 | } |
| 1194 | |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1195 | /* |
| 1196 | * Send command via the control virtqueue and check status. Commands |
| 1197 | * supported by the hypervisor, as indicated by feature bits, should |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 1198 | * never fail unless improperly formatted. |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1199 | */ |
| 1200 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1201 | struct scatterlist *out) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1202 | { |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1203 | struct scatterlist *sgs[4], hdr, stat; |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1204 | unsigned out_num = 0, tmp; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1205 | |
| 1206 | /* Caller should know better */ |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1207 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1208 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1209 | vi->ctrl_status = ~0; |
| 1210 | vi->ctrl_hdr.class = class; |
| 1211 | vi->ctrl_hdr.cmd = cmd; |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1212 | /* Add header */ |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1213 | sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1214 | sgs[out_num++] = &hdr; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1215 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1216 | if (out) |
| 1217 | sgs[out_num++] = out; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1218 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1219 | /* Add return status. */ |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1220 | sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1221 | sgs[out_num] = &stat; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1222 | |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1223 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
Rusty Russell | a7c5814 | 2014-03-13 11:23:39 +1030 | [diff] [blame] | 1224 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1225 | |
Heinz Graalfs | 6797590 | 2013-10-29 09:40:02 +1030 | [diff] [blame] | 1226 | if (unlikely(!virtqueue_kick(vi->cvq))) |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1227 | return vi->ctrl_status == VIRTIO_NET_OK; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1228 | |
| 1229 | /* Spin for a response, the kick causes an ioport write, trapping |
| 1230 | * into the hypervisor, so the request should be handled immediately. |
| 1231 | */ |
Heinz Graalfs | 047b9b9 | 2013-10-29 09:40:47 +1030 | [diff] [blame] | 1232 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
| 1233 | !virtqueue_is_broken(vi->cvq)) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1234 | cpu_relax(); |
| 1235 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1236 | return vi->ctrl_status == VIRTIO_NET_OK; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1237 | } |
| 1238 | |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1239 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 1240 | { |
| 1241 | struct virtnet_info *vi = netdev_priv(dev); |
| 1242 | struct virtio_device *vdev = vi->vdev; |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1243 | int ret; |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1244 | struct sockaddr *addr; |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1245 | struct scatterlist sg; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1246 | |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1247 | addr = kmalloc(sizeof(*addr), GFP_KERNEL); |
| 1248 | if (!addr) |
| 1249 | return -ENOMEM; |
| 1250 | memcpy(addr, p, sizeof(*addr)); |
| 1251 | |
| 1252 | ret = eth_prepare_mac_addr_change(dev, addr); |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1253 | if (ret) |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1254 | goto out; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1255 | |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1256 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
| 1257 | sg_init_one(&sg, addr->sa_data, dev->addr_len); |
| 1258 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1259 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1260 | dev_warn(&vdev->dev, |
| 1261 | "Failed to set mac address by vq command.\n"); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1262 | ret = -EINVAL; |
| 1263 | goto out; |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1264 | } |
Michael S. Tsirkin | 7e93a02 | 2014-11-26 15:58:28 +0200 | [diff] [blame] | 1265 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
| 1266 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1267 | unsigned int i; |
| 1268 | |
| 1269 | /* Naturally, this has an atomicity problem. */ |
| 1270 | for (i = 0; i < dev->addr_len; i++) |
| 1271 | virtio_cwrite8(vdev, |
| 1272 | offsetof(struct virtio_net_config, mac) + |
| 1273 | i, addr->sa_data[i]); |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1274 | } |
| 1275 | |
| 1276 | eth_commit_mac_addr_change(dev, p); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1277 | ret = 0; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1278 | |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1279 | out: |
| 1280 | kfree(addr); |
| 1281 | return ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1282 | } |
| 1283 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1284 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
| 1285 | struct rtnl_link_stats64 *tot) |
| 1286 | { |
| 1287 | struct virtnet_info *vi = netdev_priv(dev); |
| 1288 | int cpu; |
| 1289 | unsigned int start; |
| 1290 | |
| 1291 | for_each_possible_cpu(cpu) { |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 1292 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1293 | u64 tpackets, tbytes, rpackets, rbytes; |
| 1294 | |
| 1295 | do { |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1296 | start = u64_stats_fetch_begin_irq(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1297 | tpackets = stats->tx_packets; |
| 1298 | tbytes = stats->tx_bytes; |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1299 | } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 1300 | |
| 1301 | do { |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1302 | start = u64_stats_fetch_begin_irq(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1303 | rpackets = stats->rx_packets; |
| 1304 | rbytes = stats->rx_bytes; |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1305 | } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1306 | |
| 1307 | tot->rx_packets += rpackets; |
| 1308 | tot->tx_packets += tpackets; |
| 1309 | tot->rx_bytes += rbytes; |
| 1310 | tot->tx_bytes += tbytes; |
| 1311 | } |
| 1312 | |
| 1313 | tot->tx_dropped = dev->stats.tx_dropped; |
Rick Jones | 021ac8d | 2011-11-21 09:28:17 +0000 | [diff] [blame] | 1314 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1315 | tot->rx_dropped = dev->stats.rx_dropped; |
| 1316 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 1317 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
| 1318 | |
| 1319 | return tot; |
| 1320 | } |
| 1321 | |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1322 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1323 | static void virtnet_netpoll(struct net_device *dev) |
| 1324 | { |
| 1325 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1326 | int i; |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1327 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1328 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 1329 | napi_schedule(&vi->rq[i].napi); |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1330 | } |
| 1331 | #endif |
| 1332 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1333 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 1334 | { |
| 1335 | rtnl_lock(); |
| 1336 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1337 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1338 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
| 1339 | rtnl_unlock(); |
| 1340 | } |
| 1341 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1342 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
| 1343 | { |
| 1344 | struct scatterlist sg; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1345 | struct net_device *dev = vi->dev; |
| 1346 | |
| 1347 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
| 1348 | return 0; |
| 1349 | |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 1350 | vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
| 1351 | sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1352 | |
| 1353 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1354 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1355 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
| 1356 | queue_pairs); |
| 1357 | return -EINVAL; |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1358 | } else { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1359 | vi->curr_queue_pairs = queue_pairs; |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 1360 | /* virtnet_open() will refill when device is going to up. */ |
| 1361 | if (dev->flags & IFF_UP) |
| 1362 | schedule_delayed_work(&vi->refill, 0); |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1363 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1364 | |
| 1365 | return 0; |
| 1366 | } |
| 1367 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1368 | static int virtnet_close(struct net_device *dev) |
| 1369 | { |
| 1370 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1371 | int i; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1372 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 1373 | /* Make sure refill_work doesn't re-enable napi! */ |
| 1374 | cancel_delayed_work_sync(&vi->refill); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1375 | |
| 1376 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1377 | napi_disable(&vi->rq[i].napi); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1378 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1379 | return 0; |
| 1380 | } |
| 1381 | |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1382 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 1383 | { |
| 1384 | struct virtnet_info *vi = netdev_priv(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1385 | struct scatterlist sg[2]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1386 | struct virtio_net_ctrl_mac *mac_data; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1387 | struct netdev_hw_addr *ha; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1388 | int uc_count; |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1389 | int mc_count; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1390 | void *buf; |
| 1391 | int i; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1392 | |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 1393 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1394 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 1395 | return; |
| 1396 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1397 | vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 1398 | vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1399 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1400 | sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1401 | |
| 1402 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1403 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1404 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1405 | vi->ctrl_promisc ? "en" : "dis"); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1406 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1407 | sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1408 | |
| 1409 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1410 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1411 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1412 | vi->ctrl_allmulti ? "en" : "dis"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1413 | |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1414 | uc_count = netdev_uc_count(dev); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1415 | mc_count = netdev_mc_count(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1416 | /* MAC filter - use one buffer for both lists */ |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1417 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 1418 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 1419 | mac_data = buf; |
Joe Perches | e68ed8f | 2013-02-03 17:28:15 +0000 | [diff] [blame] | 1420 | if (!buf) |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1421 | return; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1422 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1423 | sg_init_table(sg, 2); |
| 1424 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1425 | /* Store the unicast list and count in the front of the buffer */ |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 1426 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1427 | i = 0; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1428 | netdev_for_each_uc_addr(ha, dev) |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1429 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1430 | |
| 1431 | sg_set_buf(&sg[0], mac_data, |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1432 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1433 | |
| 1434 | /* multicast list and count fill the end */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1435 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1436 | |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 1437 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
Jiri Pirko | 567ec87 | 2010-02-23 23:17:07 +0000 | [diff] [blame] | 1438 | i = 0; |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1439 | netdev_for_each_mc_addr(ha, dev) |
| 1440 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1441 | |
| 1442 | sg_set_buf(&sg[1], mac_data, |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1443 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1444 | |
| 1445 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1446 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
Thomas Huth | 99e872a | 2013-11-29 10:02:19 +0100 | [diff] [blame] | 1447 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1448 | |
| 1449 | kfree(buf); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1450 | } |
| 1451 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1452 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
| 1453 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1454 | { |
| 1455 | struct virtnet_info *vi = netdev_priv(dev); |
| 1456 | struct scatterlist sg; |
| 1457 | |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 1458 | vi->ctrl_vid = vid; |
| 1459 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1460 | |
| 1461 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1462 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1463 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1464 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1465 | } |
| 1466 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1467 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
| 1468 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1469 | { |
| 1470 | struct virtnet_info *vi = netdev_priv(dev); |
| 1471 | struct scatterlist sg; |
| 1472 | |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 1473 | vi->ctrl_vid = vid; |
| 1474 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1475 | |
| 1476 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1477 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1478 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1479 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1480 | } |
| 1481 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1482 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1483 | { |
| 1484 | int i; |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1485 | |
| 1486 | if (vi->affinity_hint_set) { |
| 1487 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1488 | virtqueue_set_affinity(vi->rq[i].vq, -1); |
| 1489 | virtqueue_set_affinity(vi->sq[i].vq, -1); |
| 1490 | } |
| 1491 | |
| 1492 | vi->affinity_hint_set = false; |
| 1493 | } |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1494 | } |
| 1495 | |
| 1496 | static void virtnet_set_affinity(struct virtnet_info *vi) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1497 | { |
| 1498 | int i; |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1499 | int cpu; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1500 | |
| 1501 | /* In multiqueue mode, when the number of cpu is equal to the number of |
| 1502 | * queue pairs, we let the queue pairs to be private to one cpu by |
| 1503 | * setting the affinity hint to eliminate the contention. |
| 1504 | */ |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1505 | if (vi->curr_queue_pairs == 1 || |
| 1506 | vi->max_queue_pairs != num_online_cpus()) { |
| 1507 | virtnet_clean_affinity(vi, -1); |
| 1508 | return; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1509 | } |
| 1510 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1511 | i = 0; |
| 1512 | for_each_online_cpu(cpu) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1513 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
| 1514 | virtqueue_set_affinity(vi->sq[i].vq, cpu); |
Jason Wang | 9bb8ca8 | 2013-11-05 18:19:45 +0800 | [diff] [blame] | 1515 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1516 | i++; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1517 | } |
| 1518 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1519 | vi->affinity_hint_set = true; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1520 | } |
| 1521 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1522 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1523 | { |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1524 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1525 | node); |
| 1526 | virtnet_set_affinity(vi); |
| 1527 | return 0; |
| 1528 | } |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1529 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1530 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
| 1531 | { |
| 1532 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1533 | node_dead); |
| 1534 | virtnet_set_affinity(vi); |
| 1535 | return 0; |
| 1536 | } |
Jason Wang | 3ab098d | 2013-10-15 11:18:58 +0800 | [diff] [blame] | 1537 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1538 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
| 1539 | { |
| 1540 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1541 | node); |
| 1542 | |
| 1543 | virtnet_clean_affinity(vi, cpu); |
| 1544 | return 0; |
| 1545 | } |
| 1546 | |
| 1547 | static enum cpuhp_state virtionet_online; |
| 1548 | |
| 1549 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) |
| 1550 | { |
| 1551 | int ret; |
| 1552 | |
| 1553 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); |
| 1554 | if (ret) |
| 1555 | return ret; |
| 1556 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 1557 | &vi->node_dead); |
| 1558 | if (!ret) |
| 1559 | return ret; |
| 1560 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 1561 | return ret; |
| 1562 | } |
| 1563 | |
| 1564 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) |
| 1565 | { |
| 1566 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 1567 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 1568 | &vi->node_dead); |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1569 | } |
| 1570 | |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1571 | static void virtnet_get_ringparam(struct net_device *dev, |
| 1572 | struct ethtool_ringparam *ring) |
| 1573 | { |
| 1574 | struct virtnet_info *vi = netdev_priv(dev); |
| 1575 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1576 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
| 1577 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1578 | ring->rx_pending = ring->rx_max_pending; |
| 1579 | ring->tx_pending = ring->tx_max_pending; |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1580 | } |
| 1581 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1582 | |
| 1583 | static void virtnet_get_drvinfo(struct net_device *dev, |
| 1584 | struct ethtool_drvinfo *info) |
| 1585 | { |
| 1586 | struct virtnet_info *vi = netdev_priv(dev); |
| 1587 | struct virtio_device *vdev = vi->vdev; |
| 1588 | |
| 1589 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 1590 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
| 1591 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
| 1592 | |
| 1593 | } |
| 1594 | |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1595 | /* TODO: Eliminate OOO packets during switching */ |
| 1596 | static int virtnet_set_channels(struct net_device *dev, |
| 1597 | struct ethtool_channels *channels) |
| 1598 | { |
| 1599 | struct virtnet_info *vi = netdev_priv(dev); |
| 1600 | u16 queue_pairs = channels->combined_count; |
| 1601 | int err; |
| 1602 | |
| 1603 | /* We don't support separate rx/tx channels. |
| 1604 | * We don't allow setting 'other' channels. |
| 1605 | */ |
| 1606 | if (channels->rx_count || channels->tx_count || channels->other_count) |
| 1607 | return -EINVAL; |
| 1608 | |
Amos Kong | c18e9cd | 2014-04-18 13:45:41 +0800 | [diff] [blame] | 1609 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1610 | return -EINVAL; |
| 1611 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1612 | /* For now we don't support modifying channels while XDP is loaded |
| 1613 | * also when XDP is loaded all RX queues have XDP programs so we only |
| 1614 | * need to check a single RX queue. |
| 1615 | */ |
| 1616 | if (vi->rq[0].xdp_prog) |
| 1617 | return -EINVAL; |
| 1618 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1619 | get_online_cpus(); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1620 | err = virtnet_set_queues(vi, queue_pairs); |
| 1621 | if (!err) { |
| 1622 | netif_set_real_num_tx_queues(dev, queue_pairs); |
| 1623 | netif_set_real_num_rx_queues(dev, queue_pairs); |
| 1624 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1625 | virtnet_set_affinity(vi); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1626 | } |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1627 | put_online_cpus(); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1628 | |
| 1629 | return err; |
| 1630 | } |
| 1631 | |
| 1632 | static void virtnet_get_channels(struct net_device *dev, |
| 1633 | struct ethtool_channels *channels) |
| 1634 | { |
| 1635 | struct virtnet_info *vi = netdev_priv(dev); |
| 1636 | |
| 1637 | channels->combined_count = vi->curr_queue_pairs; |
| 1638 | channels->max_combined = vi->max_queue_pairs; |
| 1639 | channels->max_other = 0; |
| 1640 | channels->rx_count = 0; |
| 1641 | channels->tx_count = 0; |
| 1642 | channels->other_count = 0; |
| 1643 | } |
| 1644 | |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1645 | /* Check if the user is trying to change anything besides speed/duplex */ |
| 1646 | static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) |
| 1647 | { |
| 1648 | struct ethtool_cmd diff1 = *cmd; |
| 1649 | struct ethtool_cmd diff2 = {}; |
| 1650 | |
Nikolay Aleksandrov | 0cf3ace | 2016-02-07 21:52:24 +0100 | [diff] [blame] | 1651 | /* cmd is always set so we need to clear it, validate the port type |
| 1652 | * and also without autonegotiation we can ignore advertising |
| 1653 | */ |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1654 | ethtool_cmd_speed_set(&diff1, 0); |
Nikolay Aleksandrov | 0cf3ace | 2016-02-07 21:52:24 +0100 | [diff] [blame] | 1655 | diff2.port = PORT_OTHER; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1656 | diff1.advertising = 0; |
| 1657 | diff1.duplex = 0; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1658 | diff1.cmd = 0; |
| 1659 | |
| 1660 | return !memcmp(&diff1, &diff2, sizeof(diff1)); |
| 1661 | } |
| 1662 | |
| 1663 | static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 1664 | { |
| 1665 | struct virtnet_info *vi = netdev_priv(dev); |
| 1666 | u32 speed; |
| 1667 | |
| 1668 | speed = ethtool_cmd_speed(cmd); |
| 1669 | /* don't allow custom speed and duplex */ |
| 1670 | if (!ethtool_validate_speed(speed) || |
| 1671 | !ethtool_validate_duplex(cmd->duplex) || |
| 1672 | !virtnet_validate_ethtool_cmd(cmd)) |
| 1673 | return -EINVAL; |
| 1674 | vi->speed = speed; |
| 1675 | vi->duplex = cmd->duplex; |
| 1676 | |
| 1677 | return 0; |
| 1678 | } |
| 1679 | |
| 1680 | static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 1681 | { |
| 1682 | struct virtnet_info *vi = netdev_priv(dev); |
| 1683 | |
| 1684 | ethtool_cmd_speed_set(cmd, vi->speed); |
| 1685 | cmd->duplex = vi->duplex; |
| 1686 | cmd->port = PORT_OTHER; |
| 1687 | |
| 1688 | return 0; |
| 1689 | } |
| 1690 | |
| 1691 | static void virtnet_init_settings(struct net_device *dev) |
| 1692 | { |
| 1693 | struct virtnet_info *vi = netdev_priv(dev); |
| 1694 | |
| 1695 | vi->speed = SPEED_UNKNOWN; |
| 1696 | vi->duplex = DUPLEX_UNKNOWN; |
| 1697 | } |
| 1698 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 1699 | static const struct ethtool_ops virtnet_ethtool_ops = { |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1700 | .get_drvinfo = virtnet_get_drvinfo, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1701 | .get_link = ethtool_op_get_link, |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1702 | .get_ringparam = virtnet_get_ringparam, |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1703 | .set_channels = virtnet_set_channels, |
| 1704 | .get_channels = virtnet_get_channels, |
Jacob Keller | 074c358 | 2014-06-25 02:37:13 +0000 | [diff] [blame] | 1705 | .get_ts_info = ethtool_op_get_ts_info, |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1706 | .get_settings = virtnet_get_settings, |
| 1707 | .set_settings = virtnet_set_settings, |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1708 | }; |
| 1709 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1710 | static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) |
| 1711 | { |
| 1712 | unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); |
| 1713 | struct virtnet_info *vi = netdev_priv(dev); |
| 1714 | struct bpf_prog *old_prog; |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 1715 | u16 xdp_qp = 0, curr_qp; |
| 1716 | int i, err; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1717 | |
Jakub Kicinski | 529ec6a | 2017-01-25 14:56:36 -0800 | [diff] [blame] | 1718 | if (prog && prog->xdp_adjust_head) { |
| 1719 | netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n"); |
| 1720 | return -EOPNOTSUPP; |
| 1721 | } |
| 1722 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1723 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || |
Jason Wang | 92502fe | 2016-12-23 22:37:30 +0800 | [diff] [blame] | 1724 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 1725 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 1726 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1727 | netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n"); |
| 1728 | return -EOPNOTSUPP; |
| 1729 | } |
| 1730 | |
| 1731 | if (vi->mergeable_rx_bufs && !vi->any_header_sg) { |
| 1732 | netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n"); |
| 1733 | return -EINVAL; |
| 1734 | } |
| 1735 | |
| 1736 | if (dev->mtu > max_sz) { |
| 1737 | netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); |
| 1738 | return -EINVAL; |
| 1739 | } |
| 1740 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 1741 | curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; |
| 1742 | if (prog) |
| 1743 | xdp_qp = nr_cpu_ids; |
| 1744 | |
| 1745 | /* XDP requires extra queues for XDP_TX */ |
| 1746 | if (curr_qp + xdp_qp > vi->max_queue_pairs) { |
| 1747 | netdev_warn(dev, "request %i queues but max is %i\n", |
| 1748 | curr_qp + xdp_qp, vi->max_queue_pairs); |
| 1749 | return -ENOMEM; |
| 1750 | } |
| 1751 | |
| 1752 | err = virtnet_set_queues(vi, curr_qp + xdp_qp); |
| 1753 | if (err) { |
| 1754 | dev_warn(&dev->dev, "XDP Device queue allocation failure.\n"); |
| 1755 | return err; |
| 1756 | } |
| 1757 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1758 | if (prog) { |
| 1759 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 1760 | if (IS_ERR(prog)) { |
| 1761 | virtnet_set_queues(vi, curr_qp); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1762 | return PTR_ERR(prog); |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 1763 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1764 | } |
| 1765 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 1766 | vi->xdp_queue_pairs = xdp_qp; |
| 1767 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
| 1768 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1769 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1770 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 1771 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
| 1772 | if (old_prog) |
| 1773 | bpf_prog_put(old_prog); |
| 1774 | } |
| 1775 | |
| 1776 | return 0; |
| 1777 | } |
| 1778 | |
| 1779 | static bool virtnet_xdp_query(struct net_device *dev) |
| 1780 | { |
| 1781 | struct virtnet_info *vi = netdev_priv(dev); |
| 1782 | int i; |
| 1783 | |
| 1784 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1785 | if (vi->rq[i].xdp_prog) |
| 1786 | return true; |
| 1787 | } |
| 1788 | return false; |
| 1789 | } |
| 1790 | |
| 1791 | static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) |
| 1792 | { |
| 1793 | switch (xdp->command) { |
| 1794 | case XDP_SETUP_PROG: |
| 1795 | return virtnet_xdp_set(dev, xdp->prog); |
| 1796 | case XDP_QUERY_PROG: |
| 1797 | xdp->prog_attached = virtnet_xdp_query(dev); |
| 1798 | return 0; |
| 1799 | default: |
| 1800 | return -EINVAL; |
| 1801 | } |
| 1802 | } |
| 1803 | |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1804 | static const struct net_device_ops virtnet_netdev = { |
| 1805 | .ndo_open = virtnet_open, |
| 1806 | .ndo_stop = virtnet_close, |
| 1807 | .ndo_start_xmit = start_xmit, |
| 1808 | .ndo_validate_addr = eth_validate_addr, |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1809 | .ndo_set_mac_address = virtnet_set_mac_address, |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1810 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1811 | .ndo_get_stats64 = virtnet_stats, |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 1812 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 1813 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1814 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1815 | .ndo_poll_controller = virtnet_netpoll, |
| 1816 | #endif |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 1817 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 1818 | .ndo_busy_poll = virtnet_busy_poll, |
| 1819 | #endif |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1820 | .ndo_xdp = virtnet_xdp, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 1821 | }; |
| 1822 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1823 | static void virtnet_config_changed_work(struct work_struct *work) |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1824 | { |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1825 | struct virtnet_info *vi = |
| 1826 | container_of(work, struct virtnet_info, config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1827 | u16 v; |
| 1828 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1829 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
| 1830 | struct virtio_net_config, status, &v) < 0) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 1831 | return; |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1832 | |
| 1833 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
Amerigo Wang | ee89bab | 2012-08-09 22:14:56 +0000 | [diff] [blame] | 1834 | netdev_notify_peers(vi->dev); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1835 | virtnet_ack_link_announce(vi); |
| 1836 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1837 | |
| 1838 | /* Ignore unknown (future) status bits */ |
| 1839 | v &= VIRTIO_NET_S_LINK_UP; |
| 1840 | |
| 1841 | if (vi->status == v) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 1842 | return; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1843 | |
| 1844 | vi->status = v; |
| 1845 | |
| 1846 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
| 1847 | netif_carrier_on(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1848 | netif_tx_wake_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1849 | } else { |
| 1850 | netif_carrier_off(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1851 | netif_tx_stop_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1852 | } |
| 1853 | } |
| 1854 | |
| 1855 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 1856 | { |
| 1857 | struct virtnet_info *vi = vdev->priv; |
| 1858 | |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1859 | schedule_work(&vi->config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1860 | } |
| 1861 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1862 | static void virtnet_free_queues(struct virtnet_info *vi) |
| 1863 | { |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 1864 | int i; |
| 1865 | |
Jason Wang | ab3971b | 2015-03-12 13:57:44 +0800 | [diff] [blame] | 1866 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1867 | napi_hash_del(&vi->rq[i].napi); |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 1868 | netif_napi_del(&vi->rq[i].napi); |
Jason Wang | ab3971b | 2015-03-12 13:57:44 +0800 | [diff] [blame] | 1869 | } |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 1870 | |
Eric Dumazet | 963abe5 | 2016-11-15 22:24:12 -0800 | [diff] [blame] | 1871 | /* We called napi_hash_del() before netif_napi_del(), |
| 1872 | * we need to respect an RCU grace period before freeing vi->rq |
| 1873 | */ |
| 1874 | synchronize_net(); |
| 1875 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1876 | kfree(vi->rq); |
| 1877 | kfree(vi->sq); |
| 1878 | } |
| 1879 | |
| 1880 | static void free_receive_bufs(struct virtnet_info *vi) |
| 1881 | { |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1882 | struct bpf_prog *old_prog; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1883 | int i; |
| 1884 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1885 | rtnl_lock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1886 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1887 | while (vi->rq[i].pages) |
| 1888 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1889 | |
| 1890 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 1891 | RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); |
| 1892 | if (old_prog) |
| 1893 | bpf_prog_put(old_prog); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1894 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1895 | rtnl_unlock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1896 | } |
| 1897 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1898 | static void free_receive_page_frags(struct virtnet_info *vi) |
| 1899 | { |
| 1900 | int i; |
| 1901 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 1902 | if (vi->rq[i].alloc_frag.page) |
| 1903 | put_page(vi->rq[i].alloc_frag.page); |
| 1904 | } |
| 1905 | |
John Fastabend | b68df01 | 2017-01-25 18:22:48 -0800 | [diff] [blame] | 1906 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 1907 | { |
John Fastabend | b68df01 | 2017-01-25 18:22:48 -0800 | [diff] [blame] | 1908 | /* For small receive mode always use kfree_skb variants */ |
| 1909 | if (!vi->mergeable_rx_bufs) |
| 1910 | return false; |
| 1911 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 1912 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) |
| 1913 | return false; |
| 1914 | else if (q < vi->curr_queue_pairs) |
| 1915 | return true; |
| 1916 | else |
| 1917 | return false; |
| 1918 | } |
| 1919 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1920 | static void free_unused_bufs(struct virtnet_info *vi) |
| 1921 | { |
| 1922 | void *buf; |
| 1923 | int i; |
| 1924 | |
| 1925 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1926 | struct virtqueue *vq = vi->sq[i].vq; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 1927 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
John Fastabend | b68df01 | 2017-01-25 18:22:48 -0800 | [diff] [blame] | 1928 | if (!is_xdp_raw_buffer_queue(vi, i)) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 1929 | dev_kfree_skb(buf); |
| 1930 | else |
| 1931 | put_page(virt_to_head_page(buf)); |
| 1932 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1933 | } |
| 1934 | |
| 1935 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1936 | struct virtqueue *vq = vi->rq[i].vq; |
| 1937 | |
| 1938 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1939 | if (vi->mergeable_rx_bufs) { |
| 1940 | unsigned long ctx = (unsigned long)buf; |
| 1941 | void *base = mergeable_ctx_to_buf_address(ctx); |
| 1942 | put_page(virt_to_head_page(base)); |
| 1943 | } else if (vi->big_packets) { |
Andrey Vagin | fa9fac1 | 2013-12-05 18:36:20 +0400 | [diff] [blame] | 1944 | give_pages(&vi->rq[i], buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1945 | } else { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1946 | dev_kfree_skb(buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1947 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1948 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1949 | } |
| 1950 | } |
| 1951 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1952 | static void virtnet_del_vqs(struct virtnet_info *vi) |
| 1953 | { |
| 1954 | struct virtio_device *vdev = vi->vdev; |
| 1955 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1956 | virtnet_clean_affinity(vi, -1); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1957 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1958 | vdev->config->del_vqs(vdev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1959 | |
| 1960 | virtnet_free_queues(vi); |
| 1961 | } |
| 1962 | |
| 1963 | static int virtnet_find_vqs(struct virtnet_info *vi) |
| 1964 | { |
| 1965 | vq_callback_t **callbacks; |
| 1966 | struct virtqueue **vqs; |
| 1967 | int ret = -ENOMEM; |
| 1968 | int i, total_vqs; |
| 1969 | const char **names; |
| 1970 | |
| 1971 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by |
| 1972 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by |
| 1973 | * possible control vq. |
| 1974 | */ |
| 1975 | total_vqs = vi->max_queue_pairs * 2 + |
| 1976 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); |
| 1977 | |
| 1978 | /* Allocate space for find_vqs parameters */ |
| 1979 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); |
| 1980 | if (!vqs) |
| 1981 | goto err_vq; |
| 1982 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); |
| 1983 | if (!callbacks) |
| 1984 | goto err_callback; |
| 1985 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); |
| 1986 | if (!names) |
| 1987 | goto err_names; |
| 1988 | |
| 1989 | /* Parameters for control virtqueue, if any */ |
| 1990 | if (vi->has_cvq) { |
| 1991 | callbacks[total_vqs - 1] = NULL; |
| 1992 | names[total_vqs - 1] = "control"; |
| 1993 | } |
| 1994 | |
| 1995 | /* Allocate/initialize parameters for send/receive virtqueues */ |
| 1996 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1997 | callbacks[rxq2vq(i)] = skb_recv_done; |
| 1998 | callbacks[txq2vq(i)] = skb_xmit_done; |
| 1999 | sprintf(vi->rq[i].name, "input.%d", i); |
| 2000 | sprintf(vi->sq[i].name, "output.%d", i); |
| 2001 | names[rxq2vq(i)] = vi->rq[i].name; |
| 2002 | names[txq2vq(i)] = vi->sq[i].name; |
| 2003 | } |
| 2004 | |
| 2005 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
| 2006 | names); |
| 2007 | if (ret) |
| 2008 | goto err_find; |
| 2009 | |
| 2010 | if (vi->has_cvq) { |
| 2011 | vi->cvq = vqs[total_vqs - 1]; |
| 2012 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 2013 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2014 | } |
| 2015 | |
| 2016 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2017 | vi->rq[i].vq = vqs[rxq2vq(i)]; |
| 2018 | vi->sq[i].vq = vqs[txq2vq(i)]; |
| 2019 | } |
| 2020 | |
| 2021 | kfree(names); |
| 2022 | kfree(callbacks); |
| 2023 | kfree(vqs); |
| 2024 | |
| 2025 | return 0; |
| 2026 | |
| 2027 | err_find: |
| 2028 | kfree(names); |
| 2029 | err_names: |
| 2030 | kfree(callbacks); |
| 2031 | err_callback: |
| 2032 | kfree(vqs); |
| 2033 | err_vq: |
| 2034 | return ret; |
| 2035 | } |
| 2036 | |
| 2037 | static int virtnet_alloc_queues(struct virtnet_info *vi) |
| 2038 | { |
| 2039 | int i; |
| 2040 | |
| 2041 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
| 2042 | if (!vi->sq) |
| 2043 | goto err_sq; |
| 2044 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); |
Amerigo Wang | 008d427 | 2012-12-10 02:24:08 +0000 | [diff] [blame] | 2045 | if (!vi->rq) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2046 | goto err_rq; |
| 2047 | |
| 2048 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
| 2049 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2050 | vi->rq[i].pages = NULL; |
| 2051 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, |
| 2052 | napi_weight); |
| 2053 | |
| 2054 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 2055 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2056 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
| 2057 | } |
| 2058 | |
| 2059 | return 0; |
| 2060 | |
| 2061 | err_rq: |
| 2062 | kfree(vi->sq); |
| 2063 | err_sq: |
| 2064 | return -ENOMEM; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2065 | } |
| 2066 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2067 | static int init_vqs(struct virtnet_info *vi) |
| 2068 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2069 | int ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2070 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2071 | /* Allocate send & receive queues */ |
| 2072 | ret = virtnet_alloc_queues(vi); |
| 2073 | if (ret) |
| 2074 | goto err; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2075 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2076 | ret = virtnet_find_vqs(vi); |
| 2077 | if (ret) |
| 2078 | goto err_free; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2079 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2080 | get_online_cpus(); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 2081 | virtnet_set_affinity(vi); |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2082 | put_online_cpus(); |
| 2083 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2084 | return 0; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2085 | |
| 2086 | err_free: |
| 2087 | virtnet_free_queues(vi); |
| 2088 | err: |
| 2089 | return ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2090 | } |
| 2091 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2092 | #ifdef CONFIG_SYSFS |
| 2093 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, |
| 2094 | struct rx_queue_attribute *attribute, char *buf) |
| 2095 | { |
| 2096 | struct virtnet_info *vi = netdev_priv(queue->dev); |
| 2097 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 2098 | struct ewma_pkt_len *avg; |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2099 | |
| 2100 | BUG_ON(queue_index >= vi->max_queue_pairs); |
| 2101 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
| 2102 | return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); |
| 2103 | } |
| 2104 | |
| 2105 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
| 2106 | __ATTR_RO(mergeable_rx_buffer_size); |
| 2107 | |
| 2108 | static struct attribute *virtio_net_mrg_rx_attrs[] = { |
| 2109 | &mergeable_rx_buffer_size_attribute.attr, |
| 2110 | NULL |
| 2111 | }; |
| 2112 | |
| 2113 | static const struct attribute_group virtio_net_mrg_rx_group = { |
| 2114 | .name = "virtio_net", |
| 2115 | .attrs = virtio_net_mrg_rx_attrs |
| 2116 | }; |
| 2117 | #endif |
| 2118 | |
Jason Wang | 892d6eb | 2014-11-20 17:03:05 +0800 | [diff] [blame] | 2119 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
| 2120 | unsigned int fbit, |
| 2121 | const char *fname, const char *dname) |
| 2122 | { |
| 2123 | if (!virtio_has_feature(vdev, fbit)) |
| 2124 | return false; |
| 2125 | |
| 2126 | dev_err(&vdev->dev, "device advertises feature %s but not %s", |
| 2127 | fname, dname); |
| 2128 | |
| 2129 | return true; |
| 2130 | } |
| 2131 | |
| 2132 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ |
| 2133 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) |
| 2134 | |
| 2135 | static bool virtnet_validate_features(struct virtio_device *vdev) |
| 2136 | { |
| 2137 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && |
| 2138 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, |
| 2139 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2140 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, |
| 2141 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2142 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, |
| 2143 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2144 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || |
| 2145 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, |
| 2146 | "VIRTIO_NET_F_CTRL_VQ"))) { |
| 2147 | return false; |
| 2148 | } |
| 2149 | |
| 2150 | return true; |
| 2151 | } |
| 2152 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 2153 | #define MIN_MTU ETH_MIN_MTU |
| 2154 | #define MAX_MTU ETH_MAX_MTU |
| 2155 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2156 | static int virtnet_probe(struct virtio_device *vdev) |
| 2157 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2158 | int i, err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2159 | struct net_device *dev; |
| 2160 | struct virtnet_info *vi; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2161 | u16 max_queue_pairs; |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 2162 | int mtu; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2163 | |
Michael S. Tsirkin | 6ba4224 | 2015-01-12 16:23:37 +0200 | [diff] [blame] | 2164 | if (!vdev->config->get) { |
| 2165 | dev_err(&vdev->dev, "%s failure: config access disabled\n", |
| 2166 | __func__); |
| 2167 | return -EINVAL; |
| 2168 | } |
| 2169 | |
Jason Wang | 892d6eb | 2014-11-20 17:03:05 +0800 | [diff] [blame] | 2170 | if (!virtnet_validate_features(vdev)) |
| 2171 | return -EINVAL; |
| 2172 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2173 | /* Find if host supports multiqueue virtio_net device */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 2174 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
| 2175 | struct virtio_net_config, |
| 2176 | max_virtqueue_pairs, &max_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2177 | |
| 2178 | /* We need at least 2 queue's */ |
| 2179 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
| 2180 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
| 2181 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 2182 | max_queue_pairs = 1; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2183 | |
| 2184 | /* Allocate ourselves a network device with room for our info */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2185 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2186 | if (!dev) |
| 2187 | return -ENOMEM; |
| 2188 | |
| 2189 | /* Set up network device as normal. */ |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 2190 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2191 | dev->netdev_ops = &virtnet_netdev; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2192 | dev->features = NETIF_F_HIGHDMA; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2193 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 2194 | dev->ethtool_ops = &virtnet_ethtool_ops; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2195 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 2196 | |
| 2197 | /* Do we support "hardware" checksums? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2198 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2199 | /* This opens up the world of extra features. */ |
Jason Wang | 48900cb | 2015-08-05 10:34:04 +0800 | [diff] [blame] | 2200 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2201 | if (csum) |
Jason Wang | 48900cb | 2015-08-05 10:34:04 +0800 | [diff] [blame] | 2202 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2203 | |
| 2204 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 2205 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 2206 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 2207 | } |
Rusty Russell | 5539ae96 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 2208 | /* Individual feature bits: what can host handle? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2209 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 2210 | dev->hw_features |= NETIF_F_TSO; |
| 2211 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 2212 | dev->hw_features |= NETIF_F_TSO6; |
| 2213 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 2214 | dev->hw_features |= NETIF_F_TSO_ECN; |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 2215 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
| 2216 | dev->hw_features |= NETIF_F_UFO; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2217 | |
Jason Wang | 41f2f12 | 2014-12-24 11:03:52 +0800 | [diff] [blame] | 2218 | dev->features |= NETIF_F_GSO_ROBUST; |
| 2219 | |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2220 | if (gso) |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 2221 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2222 | /* (!csum && gso) case will be fixed by register_netdev() */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2223 | } |
Thomas Huth | 4f49129 | 2013-08-27 17:09:02 +0200 | [diff] [blame] | 2224 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
| 2225 | dev->features |= NETIF_F_RXCSUM; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2226 | |
Jason Wang | 4fda830 | 2013-04-10 23:32:21 +0000 | [diff] [blame] | 2227 | dev->vlan_features = dev->features; |
| 2228 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 2229 | /* MTU range: 68 - 65535 */ |
| 2230 | dev->min_mtu = MIN_MTU; |
| 2231 | dev->max_mtu = MAX_MTU; |
| 2232 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2233 | /* Configuration may specify what MAC to use. Otherwise random. */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 2234 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 2235 | virtio_cread_bytes(vdev, |
| 2236 | offsetof(struct virtio_net_config, mac), |
| 2237 | dev->dev_addr, dev->addr_len); |
| 2238 | else |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 2239 | eth_hw_addr_random(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2240 | |
| 2241 | /* Set up our device-specific information */ |
| 2242 | vi = netdev_priv(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2243 | vi->dev = dev; |
| 2244 | vi->vdev = vdev; |
Christian Borntraeger | d9d5dcc | 2008-02-18 10:02:51 +0100 | [diff] [blame] | 2245 | vdev->priv = vi; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2246 | vi->stats = alloc_percpu(struct virtnet_stats); |
| 2247 | err = -ENOMEM; |
| 2248 | if (vi->stats == NULL) |
| 2249 | goto free; |
| 2250 | |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 2251 | for_each_possible_cpu(i) { |
| 2252 | struct virtnet_stats *virtnet_stats; |
| 2253 | virtnet_stats = per_cpu_ptr(vi->stats, i); |
| 2254 | u64_stats_init(&virtnet_stats->tx_syncp); |
| 2255 | u64_stats_init(&virtnet_stats->rx_syncp); |
| 2256 | } |
| 2257 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2258 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2259 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 2260 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 2261 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 2262 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 2263 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 2264 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 2265 | vi->big_packets = true; |
| 2266 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 2267 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 2268 | vi->mergeable_rx_bufs = true; |
| 2269 | |
Michael S. Tsirkin | d04302b | 2014-10-24 00:24:03 +0300 | [diff] [blame] | 2270 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || |
| 2271 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 2272 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 2273 | else |
| 2274 | vi->hdr_len = sizeof(struct virtio_net_hdr); |
| 2275 | |
Michael S. Tsirkin | 7599330 | 2015-07-15 15:26:19 +0300 | [diff] [blame] | 2276 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
| 2277 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 2278 | vi->any_header_sg = true; |
| 2279 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2280 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 2281 | vi->has_cvq = true; |
| 2282 | |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 2283 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
| 2284 | mtu = virtio_cread16(vdev, |
| 2285 | offsetof(struct virtio_net_config, |
| 2286 | mtu)); |
Aaron Conole | 93a205e | 2016-10-25 16:12:12 -0400 | [diff] [blame] | 2287 | if (mtu < dev->min_mtu) { |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 2288 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); |
Aaron Conole | 93a205e | 2016-10-25 16:12:12 -0400 | [diff] [blame] | 2289 | } else { |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 2290 | dev->mtu = mtu; |
Aaron Conole | 93a205e | 2016-10-25 16:12:12 -0400 | [diff] [blame] | 2291 | dev->max_mtu = mtu; |
| 2292 | } |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 2293 | } |
| 2294 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 2295 | if (vi->any_header_sg) |
| 2296 | dev->needed_headroom = vi->hdr_len; |
Zhangjie \(HZ\) | 6ebbc1a | 2014-04-29 18:43:22 +0800 | [diff] [blame] | 2297 | |
Jason Wang | 4490001 | 2016-11-25 12:37:26 +0800 | [diff] [blame] | 2298 | /* Enable multiqueue by default */ |
| 2299 | if (num_online_cpus() >= max_queue_pairs) |
| 2300 | vi->curr_queue_pairs = max_queue_pairs; |
| 2301 | else |
| 2302 | vi->curr_queue_pairs = num_online_cpus(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2303 | vi->max_queue_pairs = max_queue_pairs; |
| 2304 | |
| 2305 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2306 | err = init_vqs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2307 | if (err) |
Jason Wang | 9bb8ca8 | 2013-11-05 18:19:45 +0800 | [diff] [blame] | 2308 | goto free_stats; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2309 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2310 | #ifdef CONFIG_SYSFS |
| 2311 | if (vi->mergeable_rx_bufs) |
| 2312 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; |
| 2313 | #endif |
Zhi Yong Wu | 0f13b66 | 2013-11-18 21:19:27 +0800 | [diff] [blame] | 2314 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
| 2315 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2316 | |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2317 | virtnet_init_settings(dev); |
| 2318 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2319 | err = register_netdev(dev); |
| 2320 | if (err) { |
| 2321 | pr_debug("virtio_net: registering device failed\n"); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2322 | goto free_vqs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2323 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 2324 | |
Michael S. Tsirkin | 4baf1e3 | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 2325 | virtio_device_ready(vdev); |
| 2326 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2327 | err = virtnet_cpu_notif_add(vi); |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 2328 | if (err) { |
| 2329 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
wangyunjian | f00e35e | 2016-05-31 11:52:43 +0800 | [diff] [blame] | 2330 | goto free_unregister_netdev; |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 2331 | } |
| 2332 | |
Jason Wang | a220871 | 2016-12-13 14:23:05 +0800 | [diff] [blame] | 2333 | rtnl_lock(); |
| 2334 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
| 2335 | rtnl_unlock(); |
Jason Wang | 4490001 | 2016-11-25 12:37:26 +0800 | [diff] [blame] | 2336 | |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 2337 | /* Assume link up if device can't report link status, |
| 2338 | otherwise get link status from config. */ |
| 2339 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
| 2340 | netif_carrier_off(dev); |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 2341 | schedule_work(&vi->config_work); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 2342 | } else { |
| 2343 | vi->status = VIRTIO_NET_S_LINK_UP; |
| 2344 | netif_carrier_on(dev); |
| 2345 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2346 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2347 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
| 2348 | dev->name, max_queue_pairs); |
| 2349 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2350 | return 0; |
| 2351 | |
wangyunjian | f00e35e | 2016-05-31 11:52:43 +0800 | [diff] [blame] | 2352 | free_unregister_netdev: |
Michael S. Tsirkin | 0246555 | 2014-10-15 10:22:31 +1030 | [diff] [blame] | 2353 | vi->vdev->config->reset(vdev); |
| 2354 | |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 2355 | unregister_netdev(dev); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2356 | free_vqs: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2357 | cancel_delayed_work_sync(&vi->refill); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 2358 | free_receive_page_frags(vi); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2359 | virtnet_del_vqs(vi); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2360 | free_stats: |
| 2361 | free_percpu(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2362 | free: |
| 2363 | free_netdev(dev); |
| 2364 | return err; |
| 2365 | } |
| 2366 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2367 | static void remove_vq_common(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2368 | { |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2369 | vi->vdev->config->reset(vi->vdev); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 2370 | |
| 2371 | /* Free unused buffers in both send and recv, if any. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 2372 | free_unused_bufs(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 2373 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2374 | free_receive_bufs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2375 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 2376 | free_receive_page_frags(vi); |
| 2377 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2378 | virtnet_del_vqs(vi); |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2379 | } |
| 2380 | |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 2381 | static void virtnet_remove(struct virtio_device *vdev) |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2382 | { |
| 2383 | struct virtnet_info *vi = vdev->priv; |
| 2384 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2385 | virtnet_cpu_notif_remove(vi); |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 2386 | |
Michael S. Tsirkin | 102a278 | 2014-10-15 10:22:29 +1030 | [diff] [blame] | 2387 | /* Make sure no work handler is accessing the device. */ |
| 2388 | flush_work(&vi->config_work); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2389 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2390 | unregister_netdev(vi->dev); |
| 2391 | |
| 2392 | remove_vq_common(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 2393 | |
Krishna Kumar | 2e66f55 | 2011-07-20 03:56:02 +0000 | [diff] [blame] | 2394 | free_percpu(vi->stats); |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 2395 | free_netdev(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2396 | } |
| 2397 | |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 2398 | #ifdef CONFIG_PM_SLEEP |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2399 | static int virtnet_freeze(struct virtio_device *vdev) |
| 2400 | { |
| 2401 | struct virtnet_info *vi = vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2402 | int i; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2403 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2404 | virtnet_cpu_notif_remove(vi); |
Jason Wang | ec9debb | 2013-10-29 15:11:07 +0800 | [diff] [blame] | 2405 | |
Michael S. Tsirkin | 102a278 | 2014-10-15 10:22:29 +1030 | [diff] [blame] | 2406 | /* Make sure no work handler is accessing the device */ |
| 2407 | flush_work(&vi->config_work); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2408 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2409 | netif_device_detach(vi->dev); |
| 2410 | cancel_delayed_work_sync(&vi->refill); |
| 2411 | |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 2412 | if (netif_running(vi->dev)) { |
Jason Wang | ab3971b | 2015-03-12 13:57:44 +0800 | [diff] [blame] | 2413 | for (i = 0; i < vi->max_queue_pairs; i++) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2414 | napi_disable(&vi->rq[i].napi); |
Jason Wang | 9181563 | 2014-07-23 16:33:55 +0800 | [diff] [blame] | 2415 | } |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2416 | |
| 2417 | remove_vq_common(vi); |
| 2418 | |
| 2419 | return 0; |
| 2420 | } |
| 2421 | |
| 2422 | static int virtnet_restore(struct virtio_device *vdev) |
| 2423 | { |
| 2424 | struct virtnet_info *vi = vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2425 | int err, i; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2426 | |
| 2427 | err = init_vqs(vi); |
| 2428 | if (err) |
| 2429 | return err; |
| 2430 | |
Michael S. Tsirkin | e53fbd1 | 2014-10-15 10:22:32 +1030 | [diff] [blame] | 2431 | virtio_device_ready(vdev); |
| 2432 | |
Jason Wang | 6cd4ce0 | 2013-12-30 11:34:40 +0800 | [diff] [blame] | 2433 | if (netif_running(vi->dev)) { |
| 2434 | for (i = 0; i < vi->curr_queue_pairs; i++) |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 2435 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
Jason Wang | 6cd4ce0 | 2013-12-30 11:34:40 +0800 | [diff] [blame] | 2436 | schedule_delayed_work(&vi->refill, 0); |
| 2437 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2438 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2439 | virtnet_napi_enable(&vi->rq[i]); |
Jason Wang | 6cd4ce0 | 2013-12-30 11:34:40 +0800 | [diff] [blame] | 2440 | } |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2441 | |
| 2442 | netif_device_attach(vi->dev); |
| 2443 | |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 2444 | rtnl_lock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2445 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 2446 | rtnl_unlock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2447 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2448 | err = virtnet_cpu_notif_add(vi); |
Jason Wang | ec9debb | 2013-10-29 15:11:07 +0800 | [diff] [blame] | 2449 | if (err) |
| 2450 | return err; |
| 2451 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2452 | return 0; |
| 2453 | } |
| 2454 | #endif |
| 2455 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2456 | static struct virtio_device_id id_table[] = { |
| 2457 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 2458 | { 0 }, |
| 2459 | }; |
| 2460 | |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 2461 | #define VIRTNET_FEATURES \ |
| 2462 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ |
| 2463 | VIRTIO_NET_F_MAC, \ |
| 2464 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ |
| 2465 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ |
| 2466 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ |
| 2467 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ |
| 2468 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ |
| 2469 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ |
| 2470 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ |
| 2471 | VIRTIO_NET_F_MTU |
| 2472 | |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 2473 | static unsigned int features[] = { |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 2474 | VIRTNET_FEATURES, |
| 2475 | }; |
| 2476 | |
| 2477 | static unsigned int features_legacy[] = { |
| 2478 | VIRTNET_FEATURES, |
| 2479 | VIRTIO_NET_F_GSO, |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 2480 | VIRTIO_F_ANY_LAYOUT, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 2481 | }; |
| 2482 | |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 2483 | static struct virtio_driver virtio_net_driver = { |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 2484 | .feature_table = features, |
| 2485 | .feature_table_size = ARRAY_SIZE(features), |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 2486 | .feature_table_legacy = features_legacy, |
| 2487 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2488 | .driver.name = KBUILD_MODNAME, |
| 2489 | .driver.owner = THIS_MODULE, |
| 2490 | .id_table = id_table, |
| 2491 | .probe = virtnet_probe, |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 2492 | .remove = virtnet_remove, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2493 | .config_changed = virtnet_config_changed, |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 2494 | #ifdef CONFIG_PM_SLEEP |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2495 | .freeze = virtnet_freeze, |
| 2496 | .restore = virtnet_restore, |
| 2497 | #endif |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2498 | }; |
| 2499 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2500 | static __init int virtio_net_driver_init(void) |
| 2501 | { |
| 2502 | int ret; |
| 2503 | |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 2504 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2505 | virtnet_cpu_online, |
| 2506 | virtnet_cpu_down_prep); |
| 2507 | if (ret < 0) |
| 2508 | goto out; |
| 2509 | virtionet_online = ret; |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 2510 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2511 | NULL, virtnet_cpu_dead); |
| 2512 | if (ret) |
| 2513 | goto err_dead; |
| 2514 | |
| 2515 | ret = register_virtio_driver(&virtio_net_driver); |
| 2516 | if (ret) |
| 2517 | goto err_virtio; |
| 2518 | return 0; |
| 2519 | err_virtio: |
| 2520 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 2521 | err_dead: |
| 2522 | cpuhp_remove_multi_state(virtionet_online); |
| 2523 | out: |
| 2524 | return ret; |
| 2525 | } |
| 2526 | module_init(virtio_net_driver_init); |
| 2527 | |
| 2528 | static __exit void virtio_net_driver_exit(void) |
| 2529 | { |
| 2530 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 2531 | cpuhp_remove_multi_state(virtionet_online); |
| 2532 | unregister_virtio_driver(&virtio_net_driver); |
| 2533 | } |
| 2534 | module_exit(virtio_net_driver_exit); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2535 | |
| 2536 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 2537 | MODULE_DESCRIPTION("Virtio network driver"); |
| 2538 | MODULE_LICENSE("GPL"); |