Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1 | /* A network driver using virtio. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2 | * |
| 3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
Jeff Kirsher | adf8d3f | 2013-12-06 06:28:47 -0800 | [diff] [blame] | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 17 | */ |
| 18 | //#define DEBUG |
| 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/etherdevice.h> |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 21 | #include <linux/ethtool.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 22 | #include <linux/module.h> |
| 23 | #include <linux/virtio.h> |
| 24 | #include <linux/virtio_net.h> |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 25 | #include <linux/bpf.h> |
Daniel Borkmann | a67edbf | 2017-01-25 02:28:18 +0100 | [diff] [blame] | 26 | #include <linux/bpf_trace.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 27 | #include <linux/scatterlist.h> |
Alex Williamson | e918085a | 2009-01-25 18:06:26 -0800 | [diff] [blame] | 28 | #include <linux/if_vlan.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 29 | #include <linux/slab.h> |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 30 | #include <linux/cpu.h> |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 31 | #include <linux/average.h> |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 32 | #include <linux/filter.h> |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 33 | #include <net/route.h> |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 34 | #include <net/xdp.h> |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 35 | |
Amerigo Wang | d34710e | 2013-05-09 19:50:51 +0000 | [diff] [blame] | 36 | static int napi_weight = NAPI_POLL_WEIGHT; |
Dor Laor | 6c0cd7c | 2007-12-16 15:19:43 +0200 | [diff] [blame] | 37 | module_param(napi_weight, int, 0444); |
| 38 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 39 | static bool csum = true, gso = true, napi_tx; |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 40 | module_param(csum, bool, 0444); |
| 41 | module_param(gso, bool, 0444); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 42 | module_param(napi_tx, bool, 0644); |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 43 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 44 | /* FIXME: MTU in config. */ |
Michael Dalton | 5061de3 | 2013-11-14 10:41:04 -0800 | [diff] [blame] | 45 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 46 | #define GOOD_COPY_LEN 128 |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 47 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 48 | #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) |
| 49 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 50 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
| 51 | #define VIRTIO_XDP_HEADROOM 256 |
| 52 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 53 | /* RX packet size EWMA. The average packet size is used to determine the packet |
| 54 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
| 55 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
| 56 | * term, transient changes in packet size. |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 57 | */ |
Johannes Berg | eb1e011 | 2017-02-15 09:49:26 +0100 | [diff] [blame] | 58 | DECLARE_EWMA(pkt_len, 0, 64) |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 59 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 60 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
Alex Williamson | 2a41f71 | 2009-02-04 09:02:34 +0000 | [diff] [blame] | 61 | |
Colin Ian King | 7acd432 | 2017-08-12 22:45:53 +0100 | [diff] [blame] | 62 | static const unsigned long guest_offloads[] = { |
| 63 | VIRTIO_NET_F_GUEST_TSO4, |
| 64 | VIRTIO_NET_F_GUEST_TSO6, |
| 65 | VIRTIO_NET_F_GUEST_ECN, |
| 66 | VIRTIO_NET_F_GUEST_UFO |
| 67 | }; |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 68 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 69 | struct virtnet_stats { |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 70 | struct u64_stats_sync tx_syncp; |
| 71 | struct u64_stats_sync rx_syncp; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 72 | u64 tx_bytes; |
| 73 | u64 tx_packets; |
| 74 | |
| 75 | u64 rx_bytes; |
| 76 | u64 rx_packets; |
| 77 | }; |
| 78 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 79 | /* Internal representation of a send virtqueue */ |
| 80 | struct send_queue { |
| 81 | /* Virtqueue associated with this send _queue */ |
| 82 | struct virtqueue *vq; |
| 83 | |
| 84 | /* TX: fragments + linear part + virtio header */ |
| 85 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 86 | |
| 87 | /* Name of the send queue: output.$index */ |
| 88 | char name[40]; |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 89 | |
| 90 | struct napi_struct napi; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 91 | }; |
| 92 | |
| 93 | /* Internal representation of a receive virtqueue */ |
| 94 | struct receive_queue { |
| 95 | /* Virtqueue associated with this receive_queue */ |
| 96 | struct virtqueue *vq; |
| 97 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 98 | struct napi_struct napi; |
| 99 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 100 | struct bpf_prog __rcu *xdp_prog; |
| 101 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 102 | /* Chain pages by the private ptr. */ |
| 103 | struct page *pages; |
| 104 | |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 105 | /* Average packet length for mergeable receive buffers. */ |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 106 | struct ewma_pkt_len mrg_avg_pkt_len; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 107 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 108 | /* Page frag for packet buffer allocation. */ |
| 109 | struct page_frag alloc_frag; |
| 110 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 111 | /* RX: fragments + linear part + virtio header */ |
| 112 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 113 | |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 114 | /* Min single buffer size for mergeable buffers case. */ |
| 115 | unsigned int min_buf_len; |
| 116 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 117 | /* Name of this receive queue: input.$index */ |
| 118 | char name[40]; |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 119 | |
| 120 | struct xdp_rxq_info xdp_rxq; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 121 | }; |
| 122 | |
| 123 | struct virtnet_info { |
| 124 | struct virtio_device *vdev; |
| 125 | struct virtqueue *cvq; |
| 126 | struct net_device *dev; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 127 | struct send_queue *sq; |
| 128 | struct receive_queue *rq; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 129 | unsigned int status; |
| 130 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 131 | /* Max # of queue pairs supported by the device */ |
| 132 | u16 max_queue_pairs; |
| 133 | |
| 134 | /* # of queue pairs currently used by the driver */ |
| 135 | u16 curr_queue_pairs; |
| 136 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 137 | /* # of XDP queue pairs currently used by the driver */ |
| 138 | u16 xdp_queue_pairs; |
| 139 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 140 | /* I like... big packets and I cannot lie! */ |
| 141 | bool big_packets; |
| 142 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 143 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 144 | bool mergeable_rx_bufs; |
| 145 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 146 | /* Has control virtqueue */ |
| 147 | bool has_cvq; |
| 148 | |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 149 | /* Host can handle any s/g split between our header and packet data */ |
| 150 | bool any_header_sg; |
| 151 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 152 | /* Packet virtio header size */ |
| 153 | u8 hdr_len; |
| 154 | |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 155 | /* Active statistics */ |
| 156 | struct virtnet_stats __percpu *stats; |
| 157 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 158 | /* Work struct for refilling if we run low on memory. */ |
| 159 | struct delayed_work refill; |
| 160 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 161 | /* Work struct for config space updates */ |
| 162 | struct work_struct config_work; |
| 163 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 164 | /* Does the affinity hint is set for virtqueues? */ |
| 165 | bool affinity_hint_set; |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 166 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 167 | /* CPU hotplug instances for online & dead */ |
| 168 | struct hlist_node node; |
| 169 | struct hlist_node node_dead; |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 170 | |
| 171 | /* Control VQ buffers: protected by the rtnl lock */ |
| 172 | struct virtio_net_ctrl_hdr ctrl_hdr; |
| 173 | virtio_net_ctrl_ack ctrl_status; |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 174 | struct virtio_net_ctrl_mq ctrl_mq; |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 175 | u8 ctrl_promisc; |
| 176 | u8 ctrl_allmulti; |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 177 | u16 ctrl_vid; |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 178 | u64 ctrl_offloads; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 179 | |
| 180 | /* Ethtool settings */ |
| 181 | u8 duplex; |
| 182 | u32 speed; |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 183 | |
| 184 | unsigned long guest_offloads; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 185 | }; |
| 186 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 187 | struct padded_vnet_hdr { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 188 | struct virtio_net_hdr_mrg_rxbuf hdr; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 189 | /* |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 190 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
| 191 | * with this header sg. This padding makes next sg 16 byte aligned |
| 192 | * after the header. |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 193 | */ |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 194 | char padding[4]; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 195 | }; |
| 196 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 197 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
| 198 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
| 199 | */ |
| 200 | static int vq2txq(struct virtqueue *vq) |
| 201 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 202 | return (vq->index - 1) / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | static int txq2vq(int txq) |
| 206 | { |
| 207 | return txq * 2 + 1; |
| 208 | } |
| 209 | |
| 210 | static int vq2rxq(struct virtqueue *vq) |
| 211 | { |
Rusty Russell | 9d0ca6e | 2013-03-21 14:17:34 +0000 | [diff] [blame] | 212 | return vq->index / 2; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | static int rxq2vq(int rxq) |
| 216 | { |
| 217 | return rxq * 2; |
| 218 | } |
| 219 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 220 | static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 221 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 222 | return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 223 | } |
| 224 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 225 | /* |
| 226 | * private is used to chain pages for big packets, put the whole |
| 227 | * most recent used list in the beginning for reuse |
| 228 | */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 229 | static void give_pages(struct receive_queue *rq, struct page *page) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 230 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 231 | struct page *end; |
| 232 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 233 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 234 | for (end = page; end->private; end = (struct page *)end->private); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 235 | end->private = (unsigned long)rq->pages; |
| 236 | rq->pages = page; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 237 | } |
| 238 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 239 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 240 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 241 | struct page *p = rq->pages; |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 242 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 243 | if (p) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 244 | rq->pages = (struct page *)p->private; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 245 | /* clear private here, it is used to chain pages */ |
| 246 | p->private = 0; |
| 247 | } else |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 248 | p = alloc_page(gfp_mask); |
| 249 | return p; |
| 250 | } |
| 251 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 252 | static void virtqueue_napi_schedule(struct napi_struct *napi, |
| 253 | struct virtqueue *vq) |
| 254 | { |
| 255 | if (napi_schedule_prep(napi)) { |
| 256 | virtqueue_disable_cb(vq); |
| 257 | __napi_schedule(napi); |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | static void virtqueue_napi_complete(struct napi_struct *napi, |
| 262 | struct virtqueue *vq, int processed) |
| 263 | { |
| 264 | int opaque; |
| 265 | |
| 266 | opaque = virtqueue_enable_cb_prepare(vq); |
Toshiaki Makita | fdaa767 | 2017-12-07 13:15:15 +0900 | [diff] [blame] | 267 | if (napi_complete_done(napi, processed)) { |
| 268 | if (unlikely(virtqueue_poll(vq, opaque))) |
| 269 | virtqueue_napi_schedule(napi, vq); |
| 270 | } else { |
| 271 | virtqueue_disable_cb(vq); |
| 272 | } |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 273 | } |
| 274 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 275 | static void skb_xmit_done(struct virtqueue *vq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 276 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 277 | struct virtnet_info *vi = vq->vdev->priv; |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 278 | struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 279 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 280 | /* Suppress further interrupts. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 281 | virtqueue_disable_cb(vq); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 282 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 283 | if (napi->weight) |
| 284 | virtqueue_napi_schedule(napi, vq); |
| 285 | else |
| 286 | /* We were probably waiting for more output buffers. */ |
| 287 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 288 | } |
| 289 | |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 290 | #define MRG_CTX_HEADER_SHIFT 22 |
| 291 | static void *mergeable_len_to_ctx(unsigned int truesize, |
| 292 | unsigned int headroom) |
| 293 | { |
| 294 | return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); |
| 295 | } |
| 296 | |
| 297 | static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) |
| 298 | { |
| 299 | return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; |
| 300 | } |
| 301 | |
| 302 | static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) |
| 303 | { |
| 304 | return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); |
| 305 | } |
| 306 | |
Mike Waychison | 3464645 | 2012-01-04 12:52:32 +0000 | [diff] [blame] | 307 | /* Called from bottom half context */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 308 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 309 | struct receive_queue *rq, |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 310 | struct page *page, unsigned int offset, |
| 311 | unsigned int len, unsigned int truesize) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 312 | { |
| 313 | struct sk_buff *skb; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 314 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 315 | unsigned int copy, hdr_len, hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 316 | char *p; |
| 317 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 318 | p = page_address(page) + offset; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 319 | |
| 320 | /* copy small packet so we can reuse these pages for small data */ |
Paolo Abeni | c67f5db | 2016-03-17 15:44:00 +0100 | [diff] [blame] | 321 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 322 | if (unlikely(!skb)) |
| 323 | return NULL; |
| 324 | |
| 325 | hdr = skb_vnet_hdr(skb); |
| 326 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 327 | hdr_len = vi->hdr_len; |
| 328 | if (vi->mergeable_rx_bufs) |
stephen hemminger | a4a7650 | 2017-08-15 10:29:17 -0700 | [diff] [blame] | 329 | hdr_padded_len = sizeof(*hdr); |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 330 | else |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 331 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 332 | |
| 333 | memcpy(hdr, p, hdr_len); |
| 334 | |
| 335 | len -= hdr_len; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 336 | offset += hdr_padded_len; |
| 337 | p += hdr_padded_len; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 338 | |
| 339 | copy = len; |
| 340 | if (copy > skb_tailroom(skb)) |
| 341 | copy = skb_tailroom(skb); |
Johannes Berg | 59ae1d1 | 2017-06-16 14:29:20 +0200 | [diff] [blame] | 342 | skb_put_data(skb, p, copy); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 343 | |
| 344 | len -= copy; |
| 345 | offset += copy; |
| 346 | |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 347 | if (vi->mergeable_rx_bufs) { |
| 348 | if (len) |
| 349 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); |
| 350 | else |
| 351 | put_page(page); |
| 352 | return skb; |
| 353 | } |
| 354 | |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 355 | /* |
| 356 | * Verify that we can indeed put this data into a skb. |
| 357 | * This is here to handle cases when the device erroneously |
| 358 | * tries to receive more than is possible. This is usually |
| 359 | * the case of a broken device. |
| 360 | */ |
| 361 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
Amerigo Wang | be44389 | 2012-11-08 17:47:28 +0000 | [diff] [blame] | 362 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
Sasha Levin | e878d78 | 2011-09-28 04:40:54 +0000 | [diff] [blame] | 363 | dev_kfree_skb(skb); |
| 364 | return NULL; |
| 365 | } |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 366 | BUG_ON(offset >= PAGE_SIZE); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 367 | while (len) { |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 368 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
| 369 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, |
| 370 | frag_size, truesize); |
| 371 | len -= frag_size; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 372 | page = (struct page *)page->private; |
| 373 | offset = 0; |
| 374 | } |
| 375 | |
| 376 | if (page) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 377 | give_pages(rq, page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 378 | |
| 379 | return skb; |
| 380 | } |
| 381 | |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 382 | static void virtnet_xdp_flush(struct net_device *dev) |
| 383 | { |
| 384 | struct virtnet_info *vi = netdev_priv(dev); |
| 385 | struct send_queue *sq; |
| 386 | unsigned int qp; |
| 387 | |
| 388 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); |
| 389 | sq = &vi->sq[qp]; |
| 390 | |
| 391 | virtqueue_kick(sq->vq); |
| 392 | } |
| 393 | |
| 394 | static bool __virtnet_xdp_xmit(struct virtnet_info *vi, |
| 395 | struct xdp_buff *xdp) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 396 | { |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 397 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 398 | unsigned int len; |
John Fastabend | 722d828 | 2017-02-02 19:15:32 -0800 | [diff] [blame] | 399 | struct send_queue *sq; |
| 400 | unsigned int qp; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 401 | void *xdp_sent; |
| 402 | int err; |
| 403 | |
John Fastabend | 722d828 | 2017-02-02 19:15:32 -0800 | [diff] [blame] | 404 | qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); |
| 405 | sq = &vi->sq[qp]; |
| 406 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 407 | /* Free up any pending old buffers before queueing new ones. */ |
| 408 | while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 409 | struct page *sent_page = virt_to_head_page(xdp_sent); |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 410 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 411 | put_page(sent_page); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 412 | } |
| 413 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 414 | xdp->data -= vi->hdr_len; |
| 415 | /* Zero header and leave csum up to XDP layers */ |
| 416 | hdr = xdp->data; |
| 417 | memset(hdr, 0, vi->hdr_len); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 418 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 419 | sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 420 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 421 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 422 | if (unlikely(err)) { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 423 | struct page *page = virt_to_head_page(xdp->data); |
Jason Wang | bb91acc | 2016-12-23 22:37:32 +0800 | [diff] [blame] | 424 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 425 | put_page(page); |
Daniel Borkmann | a67edbf | 2017-01-25 02:28:18 +0100 | [diff] [blame] | 426 | return false; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 427 | } |
| 428 | |
Daniel Borkmann | a67edbf | 2017-01-25 02:28:18 +0100 | [diff] [blame] | 429 | return true; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 430 | } |
| 431 | |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 432 | static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) |
| 433 | { |
| 434 | struct virtnet_info *vi = netdev_priv(dev); |
| 435 | bool sent = __virtnet_xdp_xmit(vi, xdp); |
| 436 | |
| 437 | if (!sent) |
| 438 | return -ENOSPC; |
| 439 | return 0; |
| 440 | } |
| 441 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 442 | static unsigned int virtnet_get_headroom(struct virtnet_info *vi) |
| 443 | { |
| 444 | return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; |
| 445 | } |
| 446 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 447 | /* We copy the packet for XDP in the following cases: |
| 448 | * |
| 449 | * 1) Packet is scattered across multiple rx buffers. |
| 450 | * 2) Headroom space is insufficient. |
| 451 | * |
| 452 | * This is inefficient but it's a temporary condition that |
| 453 | * we hit right after XDP is enabled and until queue is refilled |
| 454 | * with large buffers with sufficient headroom - so it should affect |
| 455 | * at most queue size packets. |
| 456 | * Afterwards, the conditions to enable |
| 457 | * XDP should preclude the underlying device from sending packets |
| 458 | * across multiple buffers (num_buf > 1), and we make sure buffers |
| 459 | * have enough headroom. |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 460 | */ |
| 461 | static struct page *xdp_linearize_page(struct receive_queue *rq, |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 462 | u16 *num_buf, |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 463 | struct page *p, |
| 464 | int offset, |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 465 | int page_off, |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 466 | unsigned int *len) |
| 467 | { |
| 468 | struct page *page = alloc_page(GFP_ATOMIC); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 469 | |
| 470 | if (!page) |
| 471 | return NULL; |
| 472 | |
| 473 | memcpy(page_address(page) + page_off, page_address(p) + offset, *len); |
| 474 | page_off += *len; |
| 475 | |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 476 | while (--*num_buf) { |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 477 | unsigned int buflen; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 478 | void *buf; |
| 479 | int off; |
| 480 | |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 481 | buf = virtqueue_get_buf(rq->vq, &buflen); |
| 482 | if (unlikely(!buf)) |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 483 | goto err_buf; |
| 484 | |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 485 | p = virt_to_head_page(buf); |
| 486 | off = buf - page_address(p); |
| 487 | |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 488 | /* guard against a misconfigured or uncooperative backend that |
| 489 | * is sending packet larger than the MTU. |
| 490 | */ |
| 491 | if ((page_off + buflen) > PAGE_SIZE) { |
| 492 | put_page(p); |
| 493 | goto err_buf; |
| 494 | } |
| 495 | |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 496 | memcpy(page_address(page) + page_off, |
| 497 | page_address(p) + off, buflen); |
| 498 | page_off += buflen; |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 499 | put_page(p); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 500 | } |
| 501 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 502 | /* Headroom does not contribute to packet length */ |
| 503 | *len = page_off - VIRTIO_XDP_HEADROOM; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 504 | return page; |
| 505 | err_buf: |
| 506 | __free_pages(page, 0); |
| 507 | return NULL; |
| 508 | } |
| 509 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 510 | static struct sk_buff *receive_small(struct net_device *dev, |
| 511 | struct virtnet_info *vi, |
| 512 | struct receive_queue *rq, |
| 513 | void *buf, void *ctx, |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 514 | unsigned int len, |
| 515 | bool *xdp_xmit) |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 516 | { |
| 517 | struct sk_buff *skb; |
| 518 | struct bpf_prog *xdp_prog; |
| 519 | unsigned int xdp_headroom = (unsigned long)ctx; |
| 520 | unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; |
| 521 | unsigned int headroom = vi->hdr_len + header_offset; |
| 522 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
| 523 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 524 | struct page *page = virt_to_head_page(buf); |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 525 | unsigned int delta = 0, err; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 526 | struct page *xdp_page; |
| 527 | len -= vi->hdr_len; |
| 528 | |
| 529 | rcu_read_lock(); |
| 530 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 531 | if (xdp_prog) { |
| 532 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; |
| 533 | struct xdp_buff xdp; |
| 534 | void *orig_data; |
| 535 | u32 act; |
| 536 | |
| 537 | if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) |
| 538 | goto err_xdp; |
| 539 | |
| 540 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { |
| 541 | int offset = buf - page_address(page) + header_offset; |
| 542 | unsigned int tlen = len + vi->hdr_len; |
| 543 | u16 num_buf = 1; |
| 544 | |
| 545 | xdp_headroom = virtnet_get_headroom(vi); |
| 546 | header_offset = VIRTNET_RX_PAD + xdp_headroom; |
| 547 | headroom = vi->hdr_len + header_offset; |
| 548 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
| 549 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 550 | xdp_page = xdp_linearize_page(rq, &num_buf, page, |
| 551 | offset, header_offset, |
| 552 | &tlen); |
| 553 | if (!xdp_page) |
| 554 | goto err_xdp; |
| 555 | |
| 556 | buf = page_address(xdp_page); |
| 557 | put_page(page); |
| 558 | page = xdp_page; |
| 559 | } |
| 560 | |
| 561 | xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; |
| 562 | xdp.data = xdp.data_hard_start + xdp_headroom; |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 563 | xdp_set_data_meta_invalid(&xdp); |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 564 | xdp.data_end = xdp.data + len; |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 565 | xdp.rxq = &rq->xdp_rxq; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 566 | orig_data = xdp.data; |
| 567 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 568 | |
| 569 | switch (act) { |
| 570 | case XDP_PASS: |
| 571 | /* Recalculate length in case bpf program changed it */ |
| 572 | delta = orig_data - xdp.data; |
| 573 | break; |
| 574 | case XDP_TX: |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 575 | if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 576 | trace_xdp_exception(vi->dev, xdp_prog, act); |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 577 | else |
| 578 | *xdp_xmit = true; |
| 579 | rcu_read_unlock(); |
| 580 | goto xdp_xmit; |
| 581 | case XDP_REDIRECT: |
| 582 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
| 583 | if (!err) |
| 584 | *xdp_xmit = true; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 585 | rcu_read_unlock(); |
| 586 | goto xdp_xmit; |
| 587 | default: |
| 588 | bpf_warn_invalid_xdp_action(act); |
| 589 | case XDP_ABORTED: |
| 590 | trace_xdp_exception(vi->dev, xdp_prog, act); |
| 591 | case XDP_DROP: |
| 592 | goto err_xdp; |
| 593 | } |
| 594 | } |
| 595 | rcu_read_unlock(); |
| 596 | |
| 597 | skb = build_skb(buf, buflen); |
| 598 | if (!skb) { |
| 599 | put_page(page); |
| 600 | goto err; |
| 601 | } |
| 602 | skb_reserve(skb, headroom - delta); |
| 603 | skb_put(skb, len + delta); |
| 604 | if (!delta) { |
| 605 | buf += header_offset; |
| 606 | memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); |
| 607 | } /* keep zeroed vnet hdr since packet was changed by bpf */ |
| 608 | |
| 609 | err: |
| 610 | return skb; |
| 611 | |
| 612 | err_xdp: |
| 613 | rcu_read_unlock(); |
| 614 | dev->stats.rx_dropped++; |
| 615 | put_page(page); |
| 616 | xdp_xmit: |
| 617 | return NULL; |
| 618 | } |
| 619 | |
| 620 | static struct sk_buff *receive_big(struct net_device *dev, |
| 621 | struct virtnet_info *vi, |
| 622 | struct receive_queue *rq, |
| 623 | void *buf, |
| 624 | unsigned int len) |
| 625 | { |
| 626 | struct page *page = buf; |
| 627 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); |
| 628 | |
| 629 | if (unlikely(!skb)) |
| 630 | goto err; |
| 631 | |
| 632 | return skb; |
| 633 | |
| 634 | err: |
| 635 | dev->stats.rx_dropped++; |
| 636 | give_pages(rq, page); |
| 637 | return NULL; |
| 638 | } |
| 639 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 640 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 641 | struct virtnet_info *vi, |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 642 | struct receive_queue *rq, |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 643 | void *buf, |
| 644 | void *ctx, |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 645 | unsigned int len, |
| 646 | bool *xdp_xmit) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 647 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 648 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
| 649 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 650 | struct page *page = virt_to_head_page(buf); |
| 651 | int offset = buf - page_address(page); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 652 | struct sk_buff *head_skb, *curr_skb; |
| 653 | struct bpf_prog *xdp_prog; |
| 654 | unsigned int truesize; |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 655 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 656 | int err; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 657 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 658 | head_skb = NULL; |
| 659 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 660 | rcu_read_lock(); |
| 661 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 662 | if (xdp_prog) { |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 663 | struct page *xdp_page; |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 664 | struct xdp_buff xdp; |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 665 | void *data; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 666 | u32 act; |
| 667 | |
Jason Wang | 73b62bd | 2016-12-23 22:37:24 +0800 | [diff] [blame] | 668 | /* This happens when rx buffer size is underestimated */ |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 669 | if (unlikely(num_buf > 1 || |
| 670 | headroom < virtnet_get_headroom(vi))) { |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 671 | /* linearize data for XDP */ |
Jason Wang | 56a86f8 | 2016-12-23 22:37:26 +0800 | [diff] [blame] | 672 | xdp_page = xdp_linearize_page(rq, &num_buf, |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 673 | page, offset, |
| 674 | VIRTIO_XDP_HEADROOM, |
| 675 | &len); |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 676 | if (!xdp_page) |
| 677 | goto err_xdp; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 678 | offset = VIRTIO_XDP_HEADROOM; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 679 | } else { |
| 680 | xdp_page = page; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 681 | } |
| 682 | |
| 683 | /* Transient failure which in theory could occur if |
| 684 | * in-flight packets from before XDP was enabled reach |
| 685 | * the receive path after XDP is loaded. In practice I |
| 686 | * was not able to create this condition. |
| 687 | */ |
Jason Wang | b00f70b | 2016-12-23 22:37:28 +0800 | [diff] [blame] | 688 | if (unlikely(hdr->hdr.gso_type)) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 689 | goto err_xdp; |
| 690 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 691 | /* Allow consuming headroom but reserve enough space to push |
| 692 | * the descriptor on if we get an XDP_TX return code. |
| 693 | */ |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 694 | data = page_address(xdp_page) + offset; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 695 | xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 696 | xdp.data = data + vi->hdr_len; |
Daniel Borkmann | de8f3a8 | 2017-09-25 02:25:51 +0200 | [diff] [blame] | 697 | xdp_set_data_meta_invalid(&xdp); |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 698 | xdp.data_end = xdp.data + (len - vi->hdr_len); |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 699 | xdp.rxq = &rq->xdp_rxq; |
| 700 | |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 701 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 702 | |
Jason Wang | 3124034 | 2017-09-19 17:42:42 +0800 | [diff] [blame] | 703 | if (act != XDP_PASS) |
| 704 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); |
| 705 | |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 706 | switch (act) { |
| 707 | case XDP_PASS: |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 708 | /* recalculate offset to account for any header |
| 709 | * adjustments. Note other cases do not build an |
| 710 | * skb and avoid using offset |
| 711 | */ |
| 712 | offset = xdp.data - |
| 713 | page_address(xdp_page) - vi->hdr_len; |
| 714 | |
Jason Wang | 1830f89 | 2016-12-23 22:37:27 +0800 | [diff] [blame] | 715 | /* We can only create skb based on xdp_page. */ |
| 716 | if (unlikely(xdp_page != page)) { |
| 717 | rcu_read_unlock(); |
| 718 | put_page(page); |
| 719 | head_skb = page_to_skb(vi, rq, xdp_page, |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 720 | offset, len, PAGE_SIZE); |
Jason Wang | 1830f89 | 2016-12-23 22:37:27 +0800 | [diff] [blame] | 721 | return head_skb; |
| 722 | } |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 723 | break; |
| 724 | case XDP_TX: |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 725 | if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 726 | trace_xdp_exception(vi->dev, xdp_prog, act); |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 727 | else |
| 728 | *xdp_xmit = true; |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 729 | if (unlikely(xdp_page != page)) |
| 730 | goto err_xdp; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 731 | rcu_read_unlock(); |
| 732 | goto xdp_xmit; |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 733 | case XDP_REDIRECT: |
| 734 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
Jason Wang | dd54379 | 2017-09-22 14:38:58 +0800 | [diff] [blame] | 735 | if (!err) |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 736 | *xdp_xmit = true; |
| 737 | rcu_read_unlock(); |
| 738 | goto xdp_xmit; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 739 | default: |
John Fastabend | 0354e4d | 2017-02-02 19:15:01 -0800 | [diff] [blame] | 740 | bpf_warn_invalid_xdp_action(act); |
| 741 | case XDP_ABORTED: |
| 742 | trace_xdp_exception(vi->dev, xdp_prog, act); |
| 743 | case XDP_DROP: |
John Fastabend | 72979a6 | 2016-12-15 12:14:36 -0800 | [diff] [blame] | 744 | if (unlikely(xdp_page != page)) |
| 745 | __free_pages(xdp_page, 0); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 746 | goto err_xdp; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 747 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 748 | } |
| 749 | rcu_read_unlock(); |
| 750 | |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 751 | truesize = mergeable_ctx_to_truesize(ctx); |
| 752 | if (unlikely(len > truesize)) { |
Dan Carpenter | 56da5fd | 2017-04-06 12:04:47 +0300 | [diff] [blame] | 753 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 754 | dev->name, len, (unsigned long)ctx); |
| 755 | dev->stats.rx_length_errors++; |
| 756 | goto err_skb; |
| 757 | } |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 758 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 759 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize); |
| 760 | curr_skb = head_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 761 | |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 762 | if (unlikely(!curr_skb)) |
| 763 | goto err_skb; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 764 | while (--num_buf) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 765 | int num_skb_frags; |
| 766 | |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 767 | buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); |
Yunjian Wang | 03e9f8a | 2017-12-04 14:02:19 +0800 | [diff] [blame] | 768 | if (unlikely(!buf)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 769 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 770 | dev->name, num_buf, |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 771 | virtio16_to_cpu(vi->vdev, |
| 772 | hdr->num_buffers)); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 773 | dev->stats.rx_length_errors++; |
| 774 | goto err_buf; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 775 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 776 | |
| 777 | page = virt_to_head_page(buf); |
Jason Wang | 28b39bc | 2017-07-19 16:54:46 +0800 | [diff] [blame] | 778 | |
| 779 | truesize = mergeable_ctx_to_truesize(ctx); |
| 780 | if (unlikely(len > truesize)) { |
Dan Carpenter | 56da5fd | 2017-04-06 12:04:47 +0300 | [diff] [blame] | 781 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 782 | dev->name, len, (unsigned long)ctx); |
| 783 | dev->stats.rx_length_errors++; |
| 784 | goto err_skb; |
| 785 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 786 | |
| 787 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 788 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
| 789 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 790 | |
| 791 | if (unlikely(!nskb)) |
| 792 | goto err_skb; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 793 | if (curr_skb == head_skb) |
| 794 | skb_shinfo(curr_skb)->frag_list = nskb; |
| 795 | else |
| 796 | curr_skb->next = nskb; |
| 797 | curr_skb = nskb; |
| 798 | head_skb->truesize += nskb->truesize; |
| 799 | num_skb_frags = 0; |
| 800 | } |
| 801 | if (curr_skb != head_skb) { |
| 802 | head_skb->data_len += len; |
| 803 | head_skb->len += len; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 804 | head_skb->truesize += truesize; |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 805 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 806 | offset = buf - page_address(page); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 807 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
| 808 | put_page(page); |
| 809 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 810 | len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 811 | } else { |
| 812 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 813 | offset, len, truesize); |
Jason Wang | ba27524 | 2013-11-01 14:07:48 +0800 | [diff] [blame] | 814 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 815 | } |
| 816 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 817 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 818 | return head_skb; |
| 819 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 820 | err_xdp: |
| 821 | rcu_read_unlock(); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 822 | err_skb: |
| 823 | put_page(page); |
| 824 | while (--num_buf) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 825 | buf = virtqueue_get_buf(rq->vq, &len); |
| 826 | if (unlikely(!buf)) { |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 827 | pr_debug("%s: rx error: %d buffers missing\n", |
| 828 | dev->name, num_buf); |
| 829 | dev->stats.rx_length_errors++; |
| 830 | break; |
| 831 | } |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 832 | page = virt_to_head_page(buf); |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 833 | put_page(page); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 834 | } |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 835 | err_buf: |
| 836 | dev->stats.rx_dropped++; |
| 837 | dev_kfree_skb(head_skb); |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 838 | xdp_xmit: |
Michael S. Tsirkin | 8fc3b9e | 2013-11-28 13:30:55 +0200 | [diff] [blame] | 839 | return NULL; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 840 | } |
| 841 | |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 842 | static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 843 | void *buf, unsigned int len, void **ctx, bool *xdp_xmit) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 844 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 845 | struct net_device *dev = vi->dev; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 846 | struct sk_buff *skb; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 847 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 848 | int ret; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 849 | |
Michael S. Tsirkin | bcff316 | 2014-10-24 00:22:11 +0300 | [diff] [blame] | 850 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 851 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 852 | dev->stats.rx_length_errors++; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 853 | if (vi->mergeable_rx_bufs) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 854 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 855 | } else if (vi->big_packets) { |
Michael Dalton | 98bfd23 | 2013-12-05 13:14:05 -0800 | [diff] [blame] | 856 | give_pages(rq, buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 857 | } else { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 858 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 859 | } |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 860 | return 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 861 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 862 | |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 863 | if (vi->mergeable_rx_bufs) |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 864 | skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 865 | else if (vi->big_packets) |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 866 | skb = receive_big(dev, vi, rq, buf, len); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 867 | else |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 868 | skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); |
Michael S. Tsirkin | f121159 | 2013-11-28 13:30:59 +0200 | [diff] [blame] | 869 | |
| 870 | if (unlikely(!skb)) |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 871 | return 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 872 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 873 | hdr = skb_vnet_hdr(skb); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 874 | |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 875 | ret = skb->len; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 876 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 877 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 878 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 879 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 880 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
| 881 | virtio_is_little_endian(vi->vdev))) { |
| 882 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", |
| 883 | dev->name, hdr->hdr.gso_type, |
| 884 | hdr->hdr.gso_size); |
| 885 | goto frame_err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 886 | } |
| 887 | |
Mike Rapoport | d1dc06d | 2016-06-14 08:29:38 +0300 | [diff] [blame] | 888 | skb->protocol = eth_type_trans(skb, dev); |
| 889 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 890 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 891 | |
Eric Dumazet | 0fbd050 | 2015-07-31 18:25:17 +0200 | [diff] [blame] | 892 | napi_gro_receive(&rq->napi, skb); |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 893 | return ret; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 894 | |
| 895 | frame_err: |
| 896 | dev->stats.rx_frame_errors++; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 897 | dev_kfree_skb(skb); |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 898 | return 0; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 899 | } |
| 900 | |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 901 | /* Unlike mergeable buffers, all buffers are allocated to the |
| 902 | * same size, except for the headroom. For this reason we do |
| 903 | * not need to use mergeable_len_to_ctx here - it is enough |
| 904 | * to store the headroom as the context ignoring the truesize. |
| 905 | */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 906 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
| 907 | gfp_t gfp) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 908 | { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 909 | struct page_frag *alloc_frag = &rq->alloc_frag; |
| 910 | char *buf; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 911 | unsigned int xdp_headroom = virtnet_get_headroom(vi); |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 912 | void *ctx = (void *)(unsigned long)xdp_headroom; |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 913 | int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 914 | int err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 915 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 916 | len = SKB_DATA_ALIGN(len) + |
| 917 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 918 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 919 | return -ENOMEM; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 920 | |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 921 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
| 922 | get_page(alloc_frag->page); |
| 923 | alloc_frag->offset += len; |
| 924 | sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, |
| 925 | vi->hdr_len + GOOD_PACKET_LEN); |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 926 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 927 | if (err < 0) |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 928 | put_page(virt_to_head_page(buf)); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 929 | return err; |
| 930 | } |
| 931 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 932 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
| 933 | gfp_t gfp) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 934 | { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 935 | struct page *first, *list = NULL; |
| 936 | char *p; |
| 937 | int i, err, offset; |
| 938 | |
Rusty Russell | a583544 | 2014-09-11 10:17:36 +0930 | [diff] [blame] | 939 | sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); |
| 940 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 941 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 942 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 943 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 944 | if (!first) { |
| 945 | if (list) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 946 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 947 | return -ENOMEM; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 948 | } |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 949 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 950 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 951 | /* chain new page in list head to match sg */ |
| 952 | first->private = (unsigned long)list; |
| 953 | list = first; |
| 954 | } |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 955 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 956 | first = get_a_page(rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 957 | if (!first) { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 958 | give_pages(rq, list); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 959 | return -ENOMEM; |
| 960 | } |
| 961 | p = page_address(first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 962 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 963 | /* rq->sg[0], rq->sg[1] share the same page */ |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 964 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
| 965 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 966 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 967 | /* rq->sg[1] for data packet, from offset */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 968 | offset = sizeof(struct padded_vnet_hdr); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 969 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 970 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 971 | /* chain first in list head */ |
| 972 | first->private = (unsigned long)list; |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 973 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
| 974 | first, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 975 | if (err < 0) |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 976 | give_pages(rq, first); |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 977 | |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 978 | return err; |
| 979 | } |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 980 | |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 981 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
| 982 | struct ewma_pkt_len *avg_pkt_len) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 983 | { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 984 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 985 | unsigned int len; |
| 986 | |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 987 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), |
Michael S. Tsirkin | f0c3192 | 2017-06-02 17:54:33 +0300 | [diff] [blame] | 988 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
Michael S. Tsirkin | e377fcc | 2017-03-06 22:21:35 +0200 | [diff] [blame] | 989 | return ALIGN(len, L1_CACHE_BYTES); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 990 | } |
| 991 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 992 | static int add_recvbuf_mergeable(struct virtnet_info *vi, |
| 993 | struct receive_queue *rq, gfp_t gfp) |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 994 | { |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 995 | struct page_frag *alloc_frag = &rq->alloc_frag; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 996 | unsigned int headroom = virtnet_get_headroom(vi); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 997 | char *buf; |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 998 | void *ctx; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 999 | int err; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1000 | unsigned int len, hole; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1001 | |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 1002 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1003 | if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1004 | return -ENOMEM; |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1005 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1006 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1007 | buf += headroom; /* advance address leaving hole at front of pkt */ |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1008 | get_page(alloc_frag->page); |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1009 | alloc_frag->offset += len + headroom; |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1010 | hole = alloc_frag->size - alloc_frag->offset; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1011 | if (hole < len + headroom) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1012 | /* To avoid internal fragmentation, if there is very likely not |
| 1013 | * enough space for another buffer, add the remaining space to |
Michael S. Tsirkin | 1daa879 | 2017-07-31 21:49:49 +0300 | [diff] [blame] | 1014 | * the current buffer. |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 1015 | */ |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1016 | len += hole; |
| 1017 | alloc_frag->offset += hole; |
| 1018 | } |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1019 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 1020 | sg_init_one(rq->sg, buf, len); |
David S. Miller | 29fda25 | 2017-08-01 10:07:50 -0700 | [diff] [blame] | 1021 | ctx = mergeable_len_to_ctx(len, headroom); |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1022 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1023 | if (err < 0) |
Michael Dalton | 2613af0 | 2013-10-28 15:44:18 -0700 | [diff] [blame] | 1024 | put_page(virt_to_head_page(buf)); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1025 | |
| 1026 | return err; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1027 | } |
| 1028 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 1029 | /* |
| 1030 | * Returns false if we couldn't fill entirely (OOM). |
| 1031 | * |
| 1032 | * Normally run in the receive path, but can also be run from ndo_open |
| 1033 | * before we're receiving packets, or from refill_work which is |
| 1034 | * careful to disable receiving (using napi_disable). |
| 1035 | */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1036 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
| 1037 | gfp_t gfp) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1038 | { |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1039 | int err; |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 1040 | bool oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1041 | |
Amit Shah | 0aea51c | 2009-08-26 14:58:28 +0530 | [diff] [blame] | 1042 | do { |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1043 | if (vi->mergeable_rx_bufs) |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 1044 | err = add_recvbuf_mergeable(vi, rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1045 | else if (vi->big_packets) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1046 | err = add_recvbuf_big(vi, rq, gfp); |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1047 | else |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1048 | err = add_recvbuf_small(vi, rq, gfp); |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1049 | |
Michael S. Tsirkin | 1788f495 | 2010-07-02 16:32:55 +0000 | [diff] [blame] | 1050 | oom = err == -ENOMEM; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1051 | if (err) |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1052 | break; |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1053 | } while (rq->vq->num_free); |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 1054 | virtqueue_kick(rq->vq); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1055 | return !oom; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1056 | } |
| 1057 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 1058 | static void skb_recv_done(struct virtqueue *rvq) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1059 | { |
| 1060 | struct virtnet_info *vi = rvq->vdev->priv; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1061 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1062 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1063 | virtqueue_napi_schedule(&rq->napi, rvq); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1064 | } |
| 1065 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1066 | static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 1067 | { |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1068 | napi_enable(napi); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 1069 | |
| 1070 | /* If all buffers were filled by other side before we napi_enabled, we |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1071 | * won't get another interrupt, so process any outstanding packets now. |
| 1072 | * Call local_bh_enable after to trigger softIRQ processing. |
| 1073 | */ |
| 1074 | local_bh_disable(); |
| 1075 | virtqueue_napi_schedule(napi, vq); |
| 1076 | local_bh_enable(); |
Bruce Rogers | 3e9d08e | 2011-02-10 11:03:31 -0800 | [diff] [blame] | 1077 | } |
| 1078 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1079 | static void virtnet_napi_tx_enable(struct virtnet_info *vi, |
| 1080 | struct virtqueue *vq, |
| 1081 | struct napi_struct *napi) |
| 1082 | { |
| 1083 | if (!napi->weight) |
| 1084 | return; |
| 1085 | |
| 1086 | /* Tx napi touches cachelines on the cpu handling tx interrupts. Only |
| 1087 | * enable the feature if this is likely affine with the transmit path. |
| 1088 | */ |
| 1089 | if (!vi->affinity_hint_set) { |
| 1090 | napi->weight = 0; |
| 1091 | return; |
| 1092 | } |
| 1093 | |
| 1094 | return virtnet_napi_enable(vq, napi); |
| 1095 | } |
| 1096 | |
Willem de Bruijn | 78a57b4 | 2017-04-25 15:59:17 -0400 | [diff] [blame] | 1097 | static void virtnet_napi_tx_disable(struct napi_struct *napi) |
| 1098 | { |
| 1099 | if (napi->weight) |
| 1100 | napi_disable(napi); |
| 1101 | } |
| 1102 | |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1103 | static void refill_work(struct work_struct *work) |
| 1104 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1105 | struct virtnet_info *vi = |
| 1106 | container_of(work, struct virtnet_info, refill.work); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1107 | bool still_empty; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1108 | int i; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1109 | |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1110 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1111 | struct receive_queue *rq = &vi->rq[i]; |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1112 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1113 | napi_disable(&rq->napi); |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1114 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1115 | virtnet_napi_enable(rq->vq, &rq->napi); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1116 | |
| 1117 | /* In theory, this can happen: if we don't get any buffers in |
| 1118 | * we will *never* try to fill again. |
| 1119 | */ |
| 1120 | if (still_empty) |
| 1121 | schedule_delayed_work(&vi->refill, HZ/2); |
| 1122 | } |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1123 | } |
| 1124 | |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1125 | static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1126 | { |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1127 | struct virtnet_info *vi = rq->vq->vdev->priv; |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 1128 | unsigned int len, received = 0, bytes = 0; |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 1129 | void *buf; |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 1130 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1131 | |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 1132 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1133 | void *ctx; |
| 1134 | |
| 1135 | while (received < budget && |
| 1136 | (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1137 | bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1138 | received++; |
| 1139 | } |
| 1140 | } else { |
| 1141 | while (received < budget && |
| 1142 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1143 | bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 1144 | received++; |
| 1145 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1146 | } |
| 1147 | |
Jason Wang | be121f4 | 2014-01-16 14:45:24 +0800 | [diff] [blame] | 1148 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1149 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1150 | schedule_delayed_work(&vi->refill, 0); |
Rusty Russell | 3161e45 | 2009-08-26 12:22:32 -0700 | [diff] [blame] | 1151 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1152 | |
Jason Wang | 61845d2 | 2017-02-17 11:33:09 +0800 | [diff] [blame] | 1153 | u64_stats_update_begin(&stats->rx_syncp); |
| 1154 | stats->rx_bytes += bytes; |
| 1155 | stats->rx_packets += received; |
| 1156 | u64_stats_update_end(&stats->rx_syncp); |
| 1157 | |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1158 | return received; |
| 1159 | } |
| 1160 | |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1161 | static void free_old_xmit_skbs(struct send_queue *sq) |
| 1162 | { |
| 1163 | struct sk_buff *skb; |
| 1164 | unsigned int len; |
| 1165 | struct virtnet_info *vi = sq->vq->vdev->priv; |
| 1166 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
| 1167 | unsigned int packets = 0; |
| 1168 | unsigned int bytes = 0; |
| 1169 | |
| 1170 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
| 1171 | pr_debug("Sent skb %p\n", skb); |
| 1172 | |
| 1173 | bytes += skb->len; |
| 1174 | packets++; |
| 1175 | |
Eric Dumazet | dadc073 | 2017-08-24 09:02:49 -0700 | [diff] [blame] | 1176 | dev_consume_skb_any(skb); |
Willem de Bruijn | ea7735d | 2017-04-24 13:49:28 -0400 | [diff] [blame] | 1177 | } |
| 1178 | |
| 1179 | /* Avoid overhead when no packets have been processed |
| 1180 | * happens when called speculatively from start_xmit. |
| 1181 | */ |
| 1182 | if (!packets) |
| 1183 | return; |
| 1184 | |
| 1185 | u64_stats_update_begin(&stats->tx_syncp); |
| 1186 | stats->tx_bytes += bytes; |
| 1187 | stats->tx_packets += packets; |
| 1188 | u64_stats_update_end(&stats->tx_syncp); |
| 1189 | } |
| 1190 | |
Willem de Bruijn | 7b0411e | 2017-04-24 13:49:29 -0400 | [diff] [blame] | 1191 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
| 1192 | { |
| 1193 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1194 | unsigned int index = vq2rxq(rq->vq); |
| 1195 | struct send_queue *sq = &vi->sq[index]; |
| 1196 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); |
| 1197 | |
| 1198 | if (!sq->napi.weight) |
| 1199 | return; |
| 1200 | |
| 1201 | if (__netif_tx_trylock(txq)) { |
| 1202 | free_old_xmit_skbs(sq); |
| 1203 | __netif_tx_unlock(txq); |
| 1204 | } |
| 1205 | |
| 1206 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) |
| 1207 | netif_tx_wake_queue(txq); |
| 1208 | } |
| 1209 | |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1210 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 1211 | { |
| 1212 | struct receive_queue *rq = |
| 1213 | container_of(napi, struct receive_queue, napi); |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1214 | unsigned int received; |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1215 | bool xdp_xmit = false; |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1216 | |
Willem de Bruijn | 7b0411e | 2017-04-24 13:49:29 -0400 | [diff] [blame] | 1217 | virtnet_poll_cleantx(rq); |
| 1218 | |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1219 | received = virtnet_receive(rq, budget, &xdp_xmit); |
Jason Wang | 2ffa759 | 2014-07-23 16:33:54 +0800 | [diff] [blame] | 1220 | |
Rusty Russell | 8329d98 | 2007-11-19 11:20:43 -0500 | [diff] [blame] | 1221 | /* Out of packets? */ |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1222 | if (received < budget) |
| 1223 | virtqueue_napi_complete(napi, rq->vq, received); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1224 | |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 1225 | if (xdp_xmit) |
| 1226 | xdp_do_flush_map(); |
| 1227 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1228 | return received; |
| 1229 | } |
| 1230 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1231 | static int virtnet_open(struct net_device *dev) |
| 1232 | { |
| 1233 | struct virtnet_info *vi = netdev_priv(dev); |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 1234 | int i, err; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1235 | |
Jason Wang | e416662 | 2013-05-21 20:03:58 +0000 | [diff] [blame] | 1236 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1237 | if (i < vi->curr_queue_pairs) |
| 1238 | /* Make sure we have some buffers: if oom use wq. */ |
Michael S. Tsirkin | 946fa56 | 2014-10-24 00:12:10 +0300 | [diff] [blame] | 1239 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
Jason Wang | e416662 | 2013-05-21 20:03:58 +0000 | [diff] [blame] | 1240 | schedule_delayed_work(&vi->refill, 0); |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 1241 | |
| 1242 | err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); |
| 1243 | if (err < 0) |
| 1244 | return err; |
| 1245 | |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1246 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1247 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1248 | } |
| 1249 | |
| 1250 | return 0; |
| 1251 | } |
| 1252 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1253 | static int virtnet_poll_tx(struct napi_struct *napi, int budget) |
| 1254 | { |
| 1255 | struct send_queue *sq = container_of(napi, struct send_queue, napi); |
| 1256 | struct virtnet_info *vi = sq->vq->vdev->priv; |
| 1257 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); |
| 1258 | |
| 1259 | __netif_tx_lock(txq, raw_smp_processor_id()); |
| 1260 | free_old_xmit_skbs(sq); |
| 1261 | __netif_tx_unlock(txq); |
| 1262 | |
| 1263 | virtqueue_napi_complete(napi, sq->vq, 0); |
| 1264 | |
| 1265 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) |
| 1266 | netif_tx_wake_queue(txq); |
| 1267 | |
| 1268 | return 0; |
| 1269 | } |
| 1270 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1271 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1272 | { |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1273 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1274 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1275 | struct virtnet_info *vi = sq->vq->vdev->priv; |
Jason A. Donenfeld | e2fcad5 | 2017-06-04 04:16:26 +0200 | [diff] [blame] | 1276 | int num_sg; |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1277 | unsigned hdr_len = vi->hdr_len; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1278 | bool can_push; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1279 | |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 1280 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1281 | |
| 1282 | can_push = vi->any_header_sg && |
| 1283 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && |
| 1284 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; |
| 1285 | /* Even if we can, don't push here yet as this would skew |
| 1286 | * csum_start offset below. */ |
| 1287 | if (can_push) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1288 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1289 | else |
| 1290 | hdr = skb_vnet_hdr(skb); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1291 | |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1292 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
Jason Wang | 6391a44 | 2017-01-20 14:32:42 +0800 | [diff] [blame] | 1293 | virtio_is_little_endian(vi->vdev), false)) |
Mike Rapoport | e858fae | 2016-06-08 16:09:21 +0300 | [diff] [blame] | 1294 | BUG(); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1295 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1296 | if (vi->mergeable_rx_bufs) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 1297 | hdr->num_buffers = 0; |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 1298 | |
Jason Wang | 547c890 | 2015-08-27 14:53:06 +0800 | [diff] [blame] | 1299 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1300 | if (can_push) { |
| 1301 | __skb_push(skb, hdr_len); |
| 1302 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); |
Jason A. Donenfeld | e2fcad5 | 2017-06-04 04:16:26 +0200 | [diff] [blame] | 1303 | if (unlikely(num_sg < 0)) |
| 1304 | return num_sg; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1305 | /* Pull header back to avoid skew in tx bytes calculations. */ |
| 1306 | __skb_pull(skb, hdr_len); |
| 1307 | } else { |
| 1308 | sg_set_buf(sq->sg, hdr, hdr_len); |
Jason A. Donenfeld | e2fcad5 | 2017-06-04 04:16:26 +0200 | [diff] [blame] | 1309 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
| 1310 | if (unlikely(num_sg < 0)) |
| 1311 | return num_sg; |
| 1312 | num_sg++; |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 1313 | } |
Rusty Russell | 9dc7b9e | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1314 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
Rusty Russell | 11a3a15 | 2008-05-26 17:48:13 +1000 | [diff] [blame] | 1315 | } |
| 1316 | |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 1317 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1318 | { |
| 1319 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1320 | int qnum = skb_get_queue_mapping(skb); |
| 1321 | struct send_queue *sq = &vi->sq[qnum]; |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1322 | int err; |
Michael S. Tsirkin | 4b7fd2e6 | 2014-10-15 16:23:28 +0300 | [diff] [blame] | 1323 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
| 1324 | bool kick = !skb->xmit_more; |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1325 | bool use_napi = sq->napi.weight; |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1326 | |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1327 | /* Free up any pending old buffers before queueing new ones. */ |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1328 | free_old_xmit_skbs(sq); |
Rusty Russell | 2cb9c6b | 2008-02-04 23:50:07 -0500 | [diff] [blame] | 1329 | |
Willem de Bruijn | bdb12e0 | 2017-04-24 13:49:30 -0400 | [diff] [blame] | 1330 | if (use_napi && kick) |
| 1331 | virtqueue_enable_cb_delayed(sq->vq); |
| 1332 | |
Jacob Keller | 074c358 | 2014-06-25 02:37:13 +0000 | [diff] [blame] | 1333 | /* timestamp packet in software */ |
| 1334 | skb_tx_timestamp(skb); |
| 1335 | |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 1336 | /* Try to transmit */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1337 | err = xmit_skb(sq, skb); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1338 | |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1339 | /* This should not happen! */ |
Jason Wang | 681daee2 | 2014-03-26 13:03:00 +0800 | [diff] [blame] | 1340 | if (unlikely(err)) { |
Rusty Russell | 9ed4cb0 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 1341 | dev->stats.tx_fifo_errors++; |
| 1342 | if (net_ratelimit()) |
| 1343 | dev_warn(&dev->dev, |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1344 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 1345 | dev->stats.tx_dropped++; |
Eric W. Biederman | 85e9452 | 2014-03-15 18:43:33 -0700 | [diff] [blame] | 1346 | dev_kfree_skb_any(skb); |
Rusty Russell | 58eba97d | 2010-07-02 16:34:01 +0000 | [diff] [blame] | 1347 | return NETDEV_TX_OK; |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1348 | } |
Michael S. Tsirkin | 03f191b | 2009-10-28 04:03:38 -0700 | [diff] [blame] | 1349 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1350 | /* Don't wait up for transmitted skbs to be freed. */ |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1351 | if (!use_napi) { |
| 1352 | skb_orphan(skb); |
| 1353 | nf_reset(skb); |
| 1354 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1355 | |
Michael S. Tsirkin | 60302ff | 2015-04-02 13:05:47 +0200 | [diff] [blame] | 1356 | /* If running out of space, stop queue to avoid getting packets that we |
| 1357 | * are then unable to transmit. |
| 1358 | * An alternative would be to force queuing layer to requeue the skb by |
| 1359 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be |
| 1360 | * returned in a normal path of operation: it means that driver is not |
| 1361 | * maintaining the TX queue stop/start state properly, and causes |
| 1362 | * the stack to do a non-trivial amount of useless work. |
| 1363 | * Since most packets only take 1 or 2 ring slots, stopping the queue |
| 1364 | * early means 16 slots are typically wasted. |
stephen hemminger | d631b94 | 2015-03-24 16:22:07 -0700 | [diff] [blame] | 1365 | */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1366 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1367 | netif_stop_subqueue(dev, qnum); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1368 | if (!use_napi && |
| 1369 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1370 | /* More just got used, free them then recheck. */ |
Linus Torvalds | b7dfde9 | 2012-12-20 08:37:04 -0800 | [diff] [blame] | 1371 | free_old_xmit_skbs(sq); |
| 1372 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1373 | netif_start_subqueue(dev, qnum); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 1374 | virtqueue_disable_cb(sq->vq); |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1375 | } |
| 1376 | } |
Rusty Russell | 99ffc69 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 1377 | } |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1378 | |
Michael S. Tsirkin | 4b7fd2e6 | 2014-10-15 16:23:28 +0300 | [diff] [blame] | 1379 | if (kick || netif_xmit_stopped(txq)) |
David S. Miller | 0b725a2 | 2014-08-25 15:51:53 -0700 | [diff] [blame] | 1380 | virtqueue_kick(sq->vq); |
| 1381 | |
Rusty Russell | 48925e3 | 2009-09-24 09:59:20 -0600 | [diff] [blame] | 1382 | return NETDEV_TX_OK; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1383 | } |
| 1384 | |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1385 | /* |
| 1386 | * Send command via the control virtqueue and check status. Commands |
| 1387 | * supported by the hypervisor, as indicated by feature bits, should |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 1388 | * never fail unless improperly formatted. |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1389 | */ |
| 1390 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1391 | struct scatterlist *out) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1392 | { |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1393 | struct scatterlist *sgs[4], hdr, stat; |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1394 | unsigned out_num = 0, tmp; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1395 | |
| 1396 | /* Caller should know better */ |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1397 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1398 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1399 | vi->ctrl_status = ~0; |
| 1400 | vi->ctrl_hdr.class = class; |
| 1401 | vi->ctrl_hdr.cmd = cmd; |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1402 | /* Add header */ |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1403 | sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1404 | sgs[out_num++] = &hdr; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1405 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1406 | if (out) |
| 1407 | sgs[out_num++] = out; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1408 | |
Rusty Russell | f7bc959 | 2013-03-20 15:44:28 +1030 | [diff] [blame] | 1409 | /* Add return status. */ |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1410 | sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1411 | sgs[out_num] = &stat; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1412 | |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1413 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
Rusty Russell | a7c5814 | 2014-03-13 11:23:39 +1030 | [diff] [blame] | 1414 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1415 | |
Heinz Graalfs | 6797590 | 2013-10-29 09:40:02 +1030 | [diff] [blame] | 1416 | if (unlikely(!virtqueue_kick(vi->cvq))) |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1417 | return vi->ctrl_status == VIRTIO_NET_OK; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1418 | |
| 1419 | /* Spin for a response, the kick causes an ioport write, trapping |
| 1420 | * into the hypervisor, so the request should be handled immediately. |
| 1421 | */ |
Heinz Graalfs | 047b9b9 | 2013-10-29 09:40:47 +1030 | [diff] [blame] | 1422 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
| 1423 | !virtqueue_is_broken(vi->cvq)) |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1424 | cpu_relax(); |
| 1425 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1426 | return vi->ctrl_status == VIRTIO_NET_OK; |
Amos Kong | 40cbfc3 | 2013-01-21 01:17:21 +0000 | [diff] [blame] | 1427 | } |
| 1428 | |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1429 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 1430 | { |
| 1431 | struct virtnet_info *vi = netdev_priv(dev); |
| 1432 | struct virtio_device *vdev = vi->vdev; |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1433 | int ret; |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1434 | struct sockaddr *addr; |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1435 | struct scatterlist sg; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1436 | |
Shyam Saini | 801822d | 2016-12-24 00:44:58 +0530 | [diff] [blame] | 1437 | addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1438 | if (!addr) |
| 1439 | return -ENOMEM; |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1440 | |
| 1441 | ret = eth_prepare_mac_addr_change(dev, addr); |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 1442 | if (ret) |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1443 | goto out; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1444 | |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1445 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
| 1446 | sg_init_one(&sg, addr->sa_data, dev->addr_len); |
| 1447 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1448 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1449 | dev_warn(&vdev->dev, |
| 1450 | "Failed to set mac address by vq command.\n"); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1451 | ret = -EINVAL; |
| 1452 | goto out; |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1453 | } |
Michael S. Tsirkin | 7e93a02 | 2014-11-26 15:58:28 +0200 | [diff] [blame] | 1454 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
| 1455 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 1456 | unsigned int i; |
| 1457 | |
| 1458 | /* Naturally, this has an atomicity problem. */ |
| 1459 | for (i = 0; i < dev->addr_len; i++) |
| 1460 | virtio_cwrite8(vdev, |
| 1461 | offsetof(struct virtio_net_config, mac) + |
| 1462 | i, addr->sa_data[i]); |
Amos Kong | 7e58d5a | 2013-01-21 01:17:23 +0000 | [diff] [blame] | 1463 | } |
| 1464 | |
| 1465 | eth_commit_mac_addr_change(dev, p); |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1466 | ret = 0; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1467 | |
Andy Lutomirski | e37e2ff | 2016-12-05 18:10:58 -0800 | [diff] [blame] | 1468 | out: |
| 1469 | kfree(addr); |
| 1470 | return ret; |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 1471 | } |
| 1472 | |
stephen hemminger | bc1f447 | 2017-01-06 19:12:52 -0800 | [diff] [blame] | 1473 | static void virtnet_stats(struct net_device *dev, |
| 1474 | struct rtnl_link_stats64 *tot) |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1475 | { |
| 1476 | struct virtnet_info *vi = netdev_priv(dev); |
| 1477 | int cpu; |
| 1478 | unsigned int start; |
| 1479 | |
| 1480 | for_each_possible_cpu(cpu) { |
Eric Dumazet | 58472a7 | 2012-02-13 06:53:41 +0000 | [diff] [blame] | 1481 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1482 | u64 tpackets, tbytes, rpackets, rbytes; |
| 1483 | |
| 1484 | do { |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1485 | start = u64_stats_fetch_begin_irq(&stats->tx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1486 | tpackets = stats->tx_packets; |
| 1487 | tbytes = stats->tx_bytes; |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1488 | } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); |
Eric Dumazet | 83a2705 | 2012-06-05 22:35:24 +0000 | [diff] [blame] | 1489 | |
| 1490 | do { |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1491 | start = u64_stats_fetch_begin_irq(&stats->rx_syncp); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1492 | rpackets = stats->rx_packets; |
| 1493 | rbytes = stats->rx_bytes; |
Eric W. Biederman | 57a7744 | 2014-03-13 21:26:42 -0700 | [diff] [blame] | 1494 | } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1495 | |
| 1496 | tot->rx_packets += rpackets; |
| 1497 | tot->tx_packets += tpackets; |
| 1498 | tot->rx_bytes += rbytes; |
| 1499 | tot->tx_bytes += tbytes; |
| 1500 | } |
| 1501 | |
| 1502 | tot->tx_dropped = dev->stats.tx_dropped; |
Rick Jones | 021ac8d | 2011-11-21 09:28:17 +0000 | [diff] [blame] | 1503 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1504 | tot->rx_dropped = dev->stats.rx_dropped; |
| 1505 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 1506 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 1507 | } |
| 1508 | |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1509 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1510 | static void virtnet_netpoll(struct net_device *dev) |
| 1511 | { |
| 1512 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1513 | int i; |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1514 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1515 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 1516 | napi_schedule(&vi->rq[i].napi); |
Amit Shah | da74e89 | 2008-02-29 16:24:50 +0530 | [diff] [blame] | 1517 | } |
| 1518 | #endif |
| 1519 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1520 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 1521 | { |
| 1522 | rtnl_lock(); |
| 1523 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1524 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 1525 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
| 1526 | rtnl_unlock(); |
| 1527 | } |
| 1528 | |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 1529 | static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1530 | { |
| 1531 | struct scatterlist sg; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1532 | struct net_device *dev = vi->dev; |
| 1533 | |
| 1534 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
| 1535 | return 0; |
| 1536 | |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 1537 | vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
| 1538 | sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1539 | |
| 1540 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1541 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1542 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
| 1543 | queue_pairs); |
| 1544 | return -EINVAL; |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1545 | } else { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1546 | vi->curr_queue_pairs = queue_pairs; |
Jason Wang | 35ed159 | 2013-10-15 11:18:59 +0800 | [diff] [blame] | 1547 | /* virtnet_open() will refill when device is going to up. */ |
| 1548 | if (dev->flags & IFF_UP) |
| 1549 | schedule_delayed_work(&vi->refill, 0); |
Sasha Levin | 55257d7 | 2013-04-29 12:00:08 +0930 | [diff] [blame] | 1550 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1551 | |
| 1552 | return 0; |
| 1553 | } |
| 1554 | |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 1555 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
| 1556 | { |
| 1557 | int err; |
| 1558 | |
| 1559 | rtnl_lock(); |
| 1560 | err = _virtnet_set_queues(vi, queue_pairs); |
| 1561 | rtnl_unlock(); |
| 1562 | return err; |
| 1563 | } |
| 1564 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1565 | static int virtnet_close(struct net_device *dev) |
| 1566 | { |
| 1567 | struct virtnet_info *vi = netdev_priv(dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1568 | int i; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1569 | |
Rusty Russell | b2baed6 | 2011-12-29 00:42:38 +0000 | [diff] [blame] | 1570 | /* Make sure refill_work doesn't re-enable napi! */ |
| 1571 | cancel_delayed_work_sync(&vi->refill); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1572 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1573 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Jesper Dangaard Brouer | 754b8a2 | 2018-01-03 11:26:04 +0100 | [diff] [blame] | 1574 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1575 | napi_disable(&vi->rq[i].napi); |
Willem de Bruijn | 78a57b4 | 2017-04-25 15:59:17 -0400 | [diff] [blame] | 1576 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1577 | } |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1578 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 1579 | return 0; |
| 1580 | } |
| 1581 | |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1582 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 1583 | { |
| 1584 | struct virtnet_info *vi = netdev_priv(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1585 | struct scatterlist sg[2]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1586 | struct virtio_net_ctrl_mac *mac_data; |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1587 | struct netdev_hw_addr *ha; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1588 | int uc_count; |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1589 | int mc_count; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1590 | void *buf; |
| 1591 | int i; |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1592 | |
stephen hemminger | 788a8b6 | 2013-12-09 16:18:45 -0800 | [diff] [blame] | 1593 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1594 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 1595 | return; |
| 1596 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1597 | vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 1598 | vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1599 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1600 | sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1601 | |
| 1602 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1603 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1604 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1605 | vi->ctrl_promisc ? "en" : "dis"); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1606 | |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1607 | sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1608 | |
| 1609 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1610 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1611 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
Michael S. Tsirkin | 2ac4603 | 2015-11-15 15:11:00 +0200 | [diff] [blame] | 1612 | vi->ctrl_allmulti ? "en" : "dis"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1613 | |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1614 | uc_count = netdev_uc_count(dev); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1615 | mc_count = netdev_mc_count(dev); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1616 | /* MAC filter - use one buffer for both lists */ |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1617 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 1618 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 1619 | mac_data = buf; |
Joe Perches | e68ed8f | 2013-02-03 17:28:15 +0000 | [diff] [blame] | 1620 | if (!buf) |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1621 | return; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1622 | |
Alex Williamson | 23e258e | 2009-05-01 17:27:56 +0000 | [diff] [blame] | 1623 | sg_init_table(sg, 2); |
| 1624 | |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1625 | /* Store the unicast list and count in the front of the buffer */ |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 1626 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1627 | i = 0; |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1628 | netdev_for_each_uc_addr(ha, dev) |
Jiri Pirko | ccffad25 | 2009-05-22 23:22:17 +0000 | [diff] [blame] | 1629 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1630 | |
| 1631 | sg_set_buf(&sg[0], mac_data, |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1632 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1633 | |
| 1634 | /* multicast list and count fill the end */ |
Jiri Pirko | 32e7bfc | 2010-01-25 13:36:10 -0800 | [diff] [blame] | 1635 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1636 | |
Michael S. Tsirkin | fdd819b | 2014-10-07 16:39:48 +0200 | [diff] [blame] | 1637 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
Jiri Pirko | 567ec87 | 2010-02-23 23:17:07 +0000 | [diff] [blame] | 1638 | i = 0; |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1639 | netdev_for_each_mc_addr(ha, dev) |
| 1640 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1641 | |
| 1642 | sg_set_buf(&sg[1], mac_data, |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1643 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1644 | |
| 1645 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1646 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
Thomas Huth | 99e872a | 2013-11-29 10:02:19 +0100 | [diff] [blame] | 1647 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
Alex Williamson | f565a7c | 2009-02-04 09:02:45 +0000 | [diff] [blame] | 1648 | |
| 1649 | kfree(buf); |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 1650 | } |
| 1651 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1652 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
| 1653 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1654 | { |
| 1655 | struct virtnet_info *vi = netdev_priv(dev); |
| 1656 | struct scatterlist sg; |
| 1657 | |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 1658 | vi->ctrl_vid = vid; |
| 1659 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1660 | |
| 1661 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1662 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1663 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1664 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1665 | } |
| 1666 | |
Patrick McHardy | 80d5c36 | 2013-04-19 02:04:28 +0000 | [diff] [blame] | 1667 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
| 1668 | __be16 proto, u16 vid) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1669 | { |
| 1670 | struct virtnet_info *vi = netdev_priv(dev); |
| 1671 | struct scatterlist sg; |
| 1672 | |
Andy Lutomirski | a725ee3 | 2016-07-18 15:34:49 -0700 | [diff] [blame] | 1673 | vi->ctrl_vid = vid; |
| 1674 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1675 | |
| 1676 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
stephen hemminger | d24bae3 | 2013-12-09 16:17:40 -0800 | [diff] [blame] | 1677 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1678 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
Jiri Pirko | 8e58613 | 2011-12-08 19:52:37 -0500 | [diff] [blame] | 1679 | return 0; |
Alex Williamson | 0bde9569 | 2009-02-04 09:02:50 +0000 | [diff] [blame] | 1680 | } |
| 1681 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1682 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1683 | { |
| 1684 | int i; |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1685 | |
| 1686 | if (vi->affinity_hint_set) { |
| 1687 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1688 | virtqueue_set_affinity(vi->rq[i].vq, -1); |
| 1689 | virtqueue_set_affinity(vi->sq[i].vq, -1); |
| 1690 | } |
| 1691 | |
| 1692 | vi->affinity_hint_set = false; |
| 1693 | } |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1694 | } |
| 1695 | |
| 1696 | static void virtnet_set_affinity(struct virtnet_info *vi) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1697 | { |
| 1698 | int i; |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1699 | int cpu; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1700 | |
| 1701 | /* In multiqueue mode, when the number of cpu is equal to the number of |
| 1702 | * queue pairs, we let the queue pairs to be private to one cpu by |
| 1703 | * setting the affinity hint to eliminate the contention. |
| 1704 | */ |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1705 | if (vi->curr_queue_pairs == 1 || |
| 1706 | vi->max_queue_pairs != num_online_cpus()) { |
| 1707 | virtnet_clean_affinity(vi, -1); |
| 1708 | return; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1709 | } |
| 1710 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1711 | i = 0; |
| 1712 | for_each_online_cpu(cpu) { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1713 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
| 1714 | virtqueue_set_affinity(vi->sq[i].vq, cpu); |
Jason Wang | 9bb8ca8 | 2013-11-05 18:19:45 +0800 | [diff] [blame] | 1715 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1716 | i++; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1717 | } |
| 1718 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1719 | vi->affinity_hint_set = true; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1720 | } |
| 1721 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1722 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1723 | { |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1724 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1725 | node); |
| 1726 | virtnet_set_affinity(vi); |
| 1727 | return 0; |
| 1728 | } |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 1729 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1730 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
| 1731 | { |
| 1732 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1733 | node_dead); |
| 1734 | virtnet_set_affinity(vi); |
| 1735 | return 0; |
| 1736 | } |
Jason Wang | 3ab098d | 2013-10-15 11:18:58 +0800 | [diff] [blame] | 1737 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 1738 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
| 1739 | { |
| 1740 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 1741 | node); |
| 1742 | |
| 1743 | virtnet_clean_affinity(vi, cpu); |
| 1744 | return 0; |
| 1745 | } |
| 1746 | |
| 1747 | static enum cpuhp_state virtionet_online; |
| 1748 | |
| 1749 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) |
| 1750 | { |
| 1751 | int ret; |
| 1752 | |
| 1753 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); |
| 1754 | if (ret) |
| 1755 | return ret; |
| 1756 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 1757 | &vi->node_dead); |
| 1758 | if (!ret) |
| 1759 | return ret; |
| 1760 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 1761 | return ret; |
| 1762 | } |
| 1763 | |
| 1764 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) |
| 1765 | { |
| 1766 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 1767 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 1768 | &vi->node_dead); |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1769 | } |
| 1770 | |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1771 | static void virtnet_get_ringparam(struct net_device *dev, |
| 1772 | struct ethtool_ringparam *ring) |
| 1773 | { |
| 1774 | struct virtnet_info *vi = netdev_priv(dev); |
| 1775 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 1776 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
| 1777 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1778 | ring->rx_pending = ring->rx_max_pending; |
| 1779 | ring->tx_pending = ring->tx_max_pending; |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1780 | } |
| 1781 | |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1782 | |
| 1783 | static void virtnet_get_drvinfo(struct net_device *dev, |
| 1784 | struct ethtool_drvinfo *info) |
| 1785 | { |
| 1786 | struct virtnet_info *vi = netdev_priv(dev); |
| 1787 | struct virtio_device *vdev = vi->vdev; |
| 1788 | |
| 1789 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 1790 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
| 1791 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
| 1792 | |
| 1793 | } |
| 1794 | |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1795 | /* TODO: Eliminate OOO packets during switching */ |
| 1796 | static int virtnet_set_channels(struct net_device *dev, |
| 1797 | struct ethtool_channels *channels) |
| 1798 | { |
| 1799 | struct virtnet_info *vi = netdev_priv(dev); |
| 1800 | u16 queue_pairs = channels->combined_count; |
| 1801 | int err; |
| 1802 | |
| 1803 | /* We don't support separate rx/tx channels. |
| 1804 | * We don't allow setting 'other' channels. |
| 1805 | */ |
| 1806 | if (channels->rx_count || channels->tx_count || channels->other_count) |
| 1807 | return -EINVAL; |
| 1808 | |
Amos Kong | c18e9cd | 2014-04-18 13:45:41 +0800 | [diff] [blame] | 1809 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1810 | return -EINVAL; |
| 1811 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 1812 | /* For now we don't support modifying channels while XDP is loaded |
| 1813 | * also when XDP is loaded all RX queues have XDP programs so we only |
| 1814 | * need to check a single RX queue. |
| 1815 | */ |
| 1816 | if (vi->rq[0].xdp_prog) |
| 1817 | return -EINVAL; |
| 1818 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1819 | get_online_cpus(); |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 1820 | err = _virtnet_set_queues(vi, queue_pairs); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1821 | if (!err) { |
| 1822 | netif_set_real_num_tx_queues(dev, queue_pairs); |
| 1823 | netif_set_real_num_rx_queues(dev, queue_pairs); |
| 1824 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 1825 | virtnet_set_affinity(vi); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1826 | } |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 1827 | put_online_cpus(); |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1828 | |
| 1829 | return err; |
| 1830 | } |
| 1831 | |
| 1832 | static void virtnet_get_channels(struct net_device *dev, |
| 1833 | struct ethtool_channels *channels) |
| 1834 | { |
| 1835 | struct virtnet_info *vi = netdev_priv(dev); |
| 1836 | |
| 1837 | channels->combined_count = vi->curr_queue_pairs; |
| 1838 | channels->max_combined = vi->max_queue_pairs; |
| 1839 | channels->max_other = 0; |
| 1840 | channels->rx_count = 0; |
| 1841 | channels->tx_count = 0; |
| 1842 | channels->other_count = 0; |
| 1843 | } |
| 1844 | |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1845 | /* Check if the user is trying to change anything besides speed/duplex */ |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1846 | static bool |
| 1847 | virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1848 | { |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1849 | struct ethtool_link_ksettings diff1 = *cmd; |
| 1850 | struct ethtool_link_ksettings diff2 = {}; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1851 | |
Nikolay Aleksandrov | 0cf3ace | 2016-02-07 21:52:24 +0100 | [diff] [blame] | 1852 | /* cmd is always set so we need to clear it, validate the port type |
| 1853 | * and also without autonegotiation we can ignore advertising |
| 1854 | */ |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1855 | diff1.base.speed = 0; |
| 1856 | diff2.base.port = PORT_OTHER; |
| 1857 | ethtool_link_ksettings_zero_link_mode(&diff1, advertising); |
| 1858 | diff1.base.duplex = 0; |
| 1859 | diff1.base.cmd = 0; |
| 1860 | diff1.base.link_mode_masks_nwords = 0; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1861 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1862 | return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && |
| 1863 | bitmap_empty(diff1.link_modes.supported, |
| 1864 | __ETHTOOL_LINK_MODE_MASK_NBITS) && |
| 1865 | bitmap_empty(diff1.link_modes.advertising, |
| 1866 | __ETHTOOL_LINK_MODE_MASK_NBITS) && |
| 1867 | bitmap_empty(diff1.link_modes.lp_advertising, |
| 1868 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1869 | } |
| 1870 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1871 | static int virtnet_set_link_ksettings(struct net_device *dev, |
| 1872 | const struct ethtool_link_ksettings *cmd) |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1873 | { |
| 1874 | struct virtnet_info *vi = netdev_priv(dev); |
| 1875 | u32 speed; |
| 1876 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1877 | speed = cmd->base.speed; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1878 | /* don't allow custom speed and duplex */ |
| 1879 | if (!ethtool_validate_speed(speed) || |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1880 | !ethtool_validate_duplex(cmd->base.duplex) || |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1881 | !virtnet_validate_ethtool_cmd(cmd)) |
| 1882 | return -EINVAL; |
| 1883 | vi->speed = speed; |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1884 | vi->duplex = cmd->base.duplex; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1885 | |
| 1886 | return 0; |
| 1887 | } |
| 1888 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1889 | static int virtnet_get_link_ksettings(struct net_device *dev, |
| 1890 | struct ethtool_link_ksettings *cmd) |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1891 | { |
| 1892 | struct virtnet_info *vi = netdev_priv(dev); |
| 1893 | |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1894 | cmd->base.speed = vi->speed; |
| 1895 | cmd->base.duplex = vi->duplex; |
| 1896 | cmd->base.port = PORT_OTHER; |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 1897 | |
| 1898 | return 0; |
| 1899 | } |
| 1900 | |
| 1901 | static void virtnet_init_settings(struct net_device *dev) |
| 1902 | { |
| 1903 | struct virtnet_info *vi = netdev_priv(dev); |
| 1904 | |
| 1905 | vi->speed = SPEED_UNKNOWN; |
| 1906 | vi->duplex = DUPLEX_UNKNOWN; |
| 1907 | } |
| 1908 | |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 1909 | static void virtnet_update_settings(struct virtnet_info *vi) |
| 1910 | { |
| 1911 | u32 speed; |
| 1912 | u8 duplex; |
| 1913 | |
| 1914 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) |
| 1915 | return; |
| 1916 | |
| 1917 | speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, |
| 1918 | speed)); |
| 1919 | if (ethtool_validate_speed(speed)) |
| 1920 | vi->speed = speed; |
| 1921 | duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, |
| 1922 | duplex)); |
| 1923 | if (ethtool_validate_duplex(duplex)) |
| 1924 | vi->duplex = duplex; |
| 1925 | } |
| 1926 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 1927 | static const struct ethtool_ops virtnet_ethtool_ops = { |
Rick Jones | 6684604 | 2011-11-14 14:17:08 +0000 | [diff] [blame] | 1928 | .get_drvinfo = virtnet_get_drvinfo, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 1929 | .get_link = ethtool_op_get_link, |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 1930 | .get_ringparam = virtnet_get_ringparam, |
Jason Wang | d73bcd2 | 2012-12-07 07:04:57 +0000 | [diff] [blame] | 1931 | .set_channels = virtnet_set_channels, |
| 1932 | .get_channels = virtnet_get_channels, |
Jacob Keller | 074c358 | 2014-06-25 02:37:13 +0000 | [diff] [blame] | 1933 | .get_ts_info = ethtool_op_get_ts_info, |
Philippe Reynes | ebb6b4b | 2017-03-21 23:24:24 +0100 | [diff] [blame] | 1934 | .get_link_ksettings = virtnet_get_link_ksettings, |
| 1935 | .set_link_ksettings = virtnet_set_link_ksettings, |
Herbert Xu | a9ea3fc | 2008-04-18 11:21:42 +0800 | [diff] [blame] | 1936 | }; |
| 1937 | |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 1938 | static void virtnet_freeze_down(struct virtio_device *vdev) |
| 1939 | { |
| 1940 | struct virtnet_info *vi = vdev->priv; |
| 1941 | int i; |
| 1942 | |
| 1943 | /* Make sure no work handler is accessing the device */ |
| 1944 | flush_work(&vi->config_work); |
| 1945 | |
| 1946 | netif_device_detach(vi->dev); |
Jason Wang | 713a98d | 2017-06-28 09:51:03 +0800 | [diff] [blame] | 1947 | netif_tx_disable(vi->dev); |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 1948 | cancel_delayed_work_sync(&vi->refill); |
| 1949 | |
| 1950 | if (netif_running(vi->dev)) { |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1951 | for (i = 0; i < vi->max_queue_pairs; i++) { |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 1952 | napi_disable(&vi->rq[i].napi); |
Willem de Bruijn | 78a57b4 | 2017-04-25 15:59:17 -0400 | [diff] [blame] | 1953 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1954 | } |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 1955 | } |
| 1956 | } |
| 1957 | |
| 1958 | static int init_vqs(struct virtnet_info *vi); |
| 1959 | |
| 1960 | static int virtnet_restore_up(struct virtio_device *vdev) |
| 1961 | { |
| 1962 | struct virtnet_info *vi = vdev->priv; |
| 1963 | int err, i; |
| 1964 | |
| 1965 | err = init_vqs(vi); |
| 1966 | if (err) |
| 1967 | return err; |
| 1968 | |
| 1969 | virtio_device_ready(vdev); |
| 1970 | |
| 1971 | if (netif_running(vi->dev)) { |
| 1972 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 1973 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
| 1974 | schedule_delayed_work(&vi->refill, 0); |
| 1975 | |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1976 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Willem de Bruijn | e4e8452 | 2017-04-24 13:49:26 -0400 | [diff] [blame] | 1977 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 1978 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
| 1979 | &vi->sq[i].napi); |
| 1980 | } |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 1981 | } |
| 1982 | |
| 1983 | netif_device_attach(vi->dev); |
| 1984 | return err; |
| 1985 | } |
| 1986 | |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 1987 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
| 1988 | { |
| 1989 | struct scatterlist sg; |
| 1990 | vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); |
| 1991 | |
| 1992 | sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); |
| 1993 | |
| 1994 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, |
| 1995 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { |
| 1996 | dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); |
| 1997 | return -EINVAL; |
| 1998 | } |
| 1999 | |
| 2000 | return 0; |
| 2001 | } |
| 2002 | |
| 2003 | static int virtnet_clear_guest_offloads(struct virtnet_info *vi) |
| 2004 | { |
| 2005 | u64 offloads = 0; |
| 2006 | |
| 2007 | if (!vi->guest_offloads) |
| 2008 | return 0; |
| 2009 | |
| 2010 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) |
| 2011 | offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; |
| 2012 | |
| 2013 | return virtnet_set_guest_offloads(vi, offloads); |
| 2014 | } |
| 2015 | |
| 2016 | static int virtnet_restore_guest_offloads(struct virtnet_info *vi) |
| 2017 | { |
| 2018 | u64 offloads = vi->guest_offloads; |
| 2019 | |
| 2020 | if (!vi->guest_offloads) |
| 2021 | return 0; |
| 2022 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) |
| 2023 | offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; |
| 2024 | |
| 2025 | return virtnet_set_guest_offloads(vi, offloads); |
| 2026 | } |
| 2027 | |
Jakub Kicinski | 9861ce0 | 2017-04-30 21:46:48 -0700 | [diff] [blame] | 2028 | static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
| 2029 | struct netlink_ext_ack *extack) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2030 | { |
| 2031 | unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); |
| 2032 | struct virtnet_info *vi = netdev_priv(dev); |
| 2033 | struct bpf_prog *old_prog; |
Jason Wang | 017b29c | 2017-02-20 11:50:20 +0800 | [diff] [blame] | 2034 | u16 xdp_qp = 0, curr_qp; |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2035 | int i, err; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2036 | |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2037 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
| 2038 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 2039 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 2040 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 2041 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 2042 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2043 | return -EOPNOTSUPP; |
| 2044 | } |
| 2045 | |
| 2046 | if (vi->mergeable_rx_bufs && !vi->any_header_sg) { |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 2047 | NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2048 | return -EINVAL; |
| 2049 | } |
| 2050 | |
| 2051 | if (dev->mtu > max_sz) { |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 2052 | NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2053 | netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); |
| 2054 | return -EINVAL; |
| 2055 | } |
| 2056 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2057 | curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; |
| 2058 | if (prog) |
| 2059 | xdp_qp = nr_cpu_ids; |
| 2060 | |
| 2061 | /* XDP requires extra queues for XDP_TX */ |
| 2062 | if (curr_qp + xdp_qp > vi->max_queue_pairs) { |
Daniel Borkmann | 4d463c4 | 2017-05-03 00:39:17 +0200 | [diff] [blame] | 2063 | NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available"); |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2064 | netdev_warn(dev, "request %i queues but max is %i\n", |
| 2065 | curr_qp + xdp_qp, vi->max_queue_pairs); |
| 2066 | return -ENOMEM; |
| 2067 | } |
| 2068 | |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 2069 | if (prog) { |
| 2070 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); |
| 2071 | if (IS_ERR(prog)) |
| 2072 | return PTR_ERR(prog); |
| 2073 | } |
| 2074 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2075 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
| 2076 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2077 | napi_disable(&vi->rq[i].napi); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2078 | |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2079 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2080 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
| 2081 | if (err) |
| 2082 | goto err; |
| 2083 | vi->xdp_queue_pairs = xdp_qp; |
John Fastabend | 672aafd | 2016-12-15 12:13:49 -0800 | [diff] [blame] | 2084 | |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2085 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2086 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 2087 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2088 | if (i == 0) { |
| 2089 | if (!old_prog) |
| 2090 | virtnet_clear_guest_offloads(vi); |
| 2091 | if (!prog) |
| 2092 | virtnet_restore_guest_offloads(vi); |
| 2093 | } |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2094 | if (old_prog) |
| 2095 | bpf_prog_put(old_prog); |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2096 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2097 | } |
| 2098 | |
| 2099 | return 0; |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 2100 | |
Jason Wang | 4941d47 | 2017-07-19 16:54:48 +0800 | [diff] [blame] | 2101 | err: |
| 2102 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2103 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
John Fastabend | 2de2f7f | 2017-02-02 19:16:29 -0800 | [diff] [blame] | 2104 | if (prog) |
| 2105 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); |
| 2106 | return err; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2107 | } |
| 2108 | |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2109 | static u32 virtnet_xdp_query(struct net_device *dev) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2110 | { |
| 2111 | struct virtnet_info *vi = netdev_priv(dev); |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2112 | const struct bpf_prog *xdp_prog; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2113 | int i; |
| 2114 | |
| 2115 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2116 | xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 2117 | if (xdp_prog) |
| 2118 | return xdp_prog->aux->id; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2119 | } |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2120 | return 0; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2121 | } |
| 2122 | |
Jakub Kicinski | f4e6352 | 2017-11-03 13:56:16 -0700 | [diff] [blame] | 2123 | static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2124 | { |
| 2125 | switch (xdp->command) { |
| 2126 | case XDP_SETUP_PROG: |
Jakub Kicinski | 9861ce0 | 2017-04-30 21:46:48 -0700 | [diff] [blame] | 2127 | return virtnet_xdp_set(dev, xdp->prog, xdp->extack); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2128 | case XDP_QUERY_PROG: |
Martin KaFai Lau | 5b0e662 | 2017-06-15 17:29:12 -0700 | [diff] [blame] | 2129 | xdp->prog_id = virtnet_xdp_query(dev); |
| 2130 | xdp->prog_attached = !!xdp->prog_id; |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2131 | return 0; |
| 2132 | default: |
| 2133 | return -EINVAL; |
| 2134 | } |
| 2135 | } |
| 2136 | |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2137 | static const struct net_device_ops virtnet_netdev = { |
| 2138 | .ndo_open = virtnet_open, |
| 2139 | .ndo_stop = virtnet_close, |
| 2140 | .ndo_start_xmit = start_xmit, |
| 2141 | .ndo_validate_addr = eth_validate_addr, |
Alex Williamson | 9c46f6d | 2009-02-04 16:36:34 -0800 | [diff] [blame] | 2142 | .ndo_set_mac_address = virtnet_set_mac_address, |
Alex Williamson | 2af7698 | 2009-02-04 09:02:40 +0000 | [diff] [blame] | 2143 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2144 | .ndo_get_stats64 = virtnet_stats, |
Alex Williamson | 1824a98 | 2009-05-01 17:31:10 +0000 | [diff] [blame] | 2145 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 2146 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2147 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 2148 | .ndo_poll_controller = virtnet_netpoll, |
| 2149 | #endif |
Jakub Kicinski | f4e6352 | 2017-11-03 13:56:16 -0700 | [diff] [blame] | 2150 | .ndo_bpf = virtnet_xdp, |
Jason Wang | 186b3c9 | 2017-09-19 17:42:43 +0800 | [diff] [blame] | 2151 | .ndo_xdp_xmit = virtnet_xdp_xmit, |
| 2152 | .ndo_xdp_flush = virtnet_xdp_flush, |
Vlad Yasevich | 2836b4f | 2017-05-23 13:38:43 -0400 | [diff] [blame] | 2153 | .ndo_features_check = passthru_features_check, |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2154 | }; |
| 2155 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2156 | static void virtnet_config_changed_work(struct work_struct *work) |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2157 | { |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2158 | struct virtnet_info *vi = |
| 2159 | container_of(work, struct virtnet_info, config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2160 | u16 v; |
| 2161 | |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 2162 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
| 2163 | struct virtio_net_config, status, &v) < 0) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 2164 | return; |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2165 | |
| 2166 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
Amerigo Wang | ee89bab | 2012-08-09 22:14:56 +0000 | [diff] [blame] | 2167 | netdev_notify_peers(vi->dev); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2168 | virtnet_ack_link_announce(vi); |
| 2169 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2170 | |
| 2171 | /* Ignore unknown (future) status bits */ |
| 2172 | v &= VIRTIO_NET_S_LINK_UP; |
| 2173 | |
| 2174 | if (vi->status == v) |
Michael S. Tsirkin | 507613b | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 2175 | return; |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2176 | |
| 2177 | vi->status = v; |
| 2178 | |
| 2179 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 2180 | virtnet_update_settings(vi); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2181 | netif_carrier_on(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2182 | netif_tx_wake_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2183 | } else { |
| 2184 | netif_carrier_off(vi->dev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2185 | netif_tx_stop_all_queues(vi->dev); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2186 | } |
| 2187 | } |
| 2188 | |
| 2189 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 2190 | { |
| 2191 | struct virtnet_info *vi = vdev->priv; |
| 2192 | |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 2193 | schedule_work(&vi->config_work); |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2194 | } |
| 2195 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2196 | static void virtnet_free_queues(struct virtnet_info *vi) |
| 2197 | { |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 2198 | int i; |
| 2199 | |
Jason Wang | ab3971b | 2015-03-12 13:57:44 +0800 | [diff] [blame] | 2200 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2201 | napi_hash_del(&vi->rq[i].napi); |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 2202 | netif_napi_del(&vi->rq[i].napi); |
Willem de Bruijn | b92f1e6 | 2017-04-24 13:49:27 -0400 | [diff] [blame] | 2203 | netif_napi_del(&vi->sq[i].napi); |
Jason Wang | ab3971b | 2015-03-12 13:57:44 +0800 | [diff] [blame] | 2204 | } |
Andrey Vagin | d4fb84e | 2013-12-05 18:36:21 +0400 | [diff] [blame] | 2205 | |
Eric Dumazet | 963abe5 | 2016-11-15 22:24:12 -0800 | [diff] [blame] | 2206 | /* We called napi_hash_del() before netif_napi_del(), |
| 2207 | * we need to respect an RCU grace period before freeing vi->rq |
| 2208 | */ |
| 2209 | synchronize_net(); |
| 2210 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2211 | kfree(vi->rq); |
| 2212 | kfree(vi->sq); |
| 2213 | } |
| 2214 | |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 2215 | static void _free_receive_bufs(struct virtnet_info *vi) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2216 | { |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2217 | struct bpf_prog *old_prog; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2218 | int i; |
| 2219 | |
| 2220 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2221 | while (vi->rq[i].pages) |
| 2222 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2223 | |
| 2224 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 2225 | RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); |
| 2226 | if (old_prog) |
| 2227 | bpf_prog_put(old_prog); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2228 | } |
John Fastabend | 47315329 | 2017-02-02 19:14:32 -0800 | [diff] [blame] | 2229 | } |
| 2230 | |
| 2231 | static void free_receive_bufs(struct virtnet_info *vi) |
| 2232 | { |
| 2233 | rtnl_lock(); |
| 2234 | _free_receive_bufs(vi); |
John Fastabend | f600b69 | 2016-12-15 12:13:24 -0800 | [diff] [blame] | 2235 | rtnl_unlock(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2236 | } |
| 2237 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 2238 | static void free_receive_page_frags(struct virtnet_info *vi) |
| 2239 | { |
| 2240 | int i; |
| 2241 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2242 | if (vi->rq[i].alloc_frag.page) |
| 2243 | put_page(vi->rq[i].alloc_frag.page); |
| 2244 | } |
| 2245 | |
John Fastabend | b68df01 | 2017-01-25 18:22:48 -0800 | [diff] [blame] | 2246 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 2247 | { |
| 2248 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) |
| 2249 | return false; |
| 2250 | else if (q < vi->curr_queue_pairs) |
| 2251 | return true; |
| 2252 | else |
| 2253 | return false; |
| 2254 | } |
| 2255 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2256 | static void free_unused_bufs(struct virtnet_info *vi) |
| 2257 | { |
| 2258 | void *buf; |
| 2259 | int i; |
| 2260 | |
| 2261 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2262 | struct virtqueue *vq = vi->sq[i].vq; |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 2263 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
John Fastabend | b68df01 | 2017-01-25 18:22:48 -0800 | [diff] [blame] | 2264 | if (!is_xdp_raw_buffer_queue(vi, i)) |
John Fastabend | 56434a0 | 2016-12-15 12:14:13 -0800 | [diff] [blame] | 2265 | dev_kfree_skb(buf); |
| 2266 | else |
| 2267 | put_page(virt_to_head_page(buf)); |
| 2268 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2269 | } |
| 2270 | |
| 2271 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2272 | struct virtqueue *vq = vi->rq[i].vq; |
| 2273 | |
| 2274 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2275 | if (vi->mergeable_rx_bufs) { |
Michael S. Tsirkin | 680557c | 2017-03-06 21:29:47 +0200 | [diff] [blame] | 2276 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2277 | } else if (vi->big_packets) { |
Andrey Vagin | fa9fac1 | 2013-12-05 18:36:20 +0400 | [diff] [blame] | 2278 | give_pages(&vi->rq[i], buf); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2279 | } else { |
Jason Wang | f6b1020 | 2017-02-21 16:46:28 +0800 | [diff] [blame] | 2280 | put_page(virt_to_head_page(buf)); |
Michael Dalton | ab7db91 | 2014-01-16 22:23:27 -0800 | [diff] [blame] | 2281 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2282 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2283 | } |
| 2284 | } |
| 2285 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2286 | static void virtnet_del_vqs(struct virtnet_info *vi) |
| 2287 | { |
| 2288 | struct virtio_device *vdev = vi->vdev; |
| 2289 | |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 2290 | virtnet_clean_affinity(vi, -1); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2291 | |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2292 | vdev->config->del_vqs(vdev); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2293 | |
| 2294 | virtnet_free_queues(vi); |
| 2295 | } |
| 2296 | |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2297 | /* How large should a single buffer be so a queue full of these can fit at |
| 2298 | * least one full packet? |
| 2299 | * Logic below assumes the mergeable buffer header is used. |
| 2300 | */ |
| 2301 | static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) |
| 2302 | { |
| 2303 | const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 2304 | unsigned int rq_size = virtqueue_get_vring_size(vq); |
| 2305 | unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; |
| 2306 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; |
| 2307 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); |
| 2308 | |
Michael S. Tsirkin | f0c3192 | 2017-06-02 17:54:33 +0300 | [diff] [blame] | 2309 | return max(max(min_buf_len, hdr_len) - hdr_len, |
| 2310 | (unsigned int)GOOD_PACKET_LEN); |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2311 | } |
| 2312 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2313 | static int virtnet_find_vqs(struct virtnet_info *vi) |
| 2314 | { |
| 2315 | vq_callback_t **callbacks; |
| 2316 | struct virtqueue **vqs; |
| 2317 | int ret = -ENOMEM; |
| 2318 | int i, total_vqs; |
| 2319 | const char **names; |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2320 | bool *ctx; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2321 | |
| 2322 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by |
| 2323 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by |
| 2324 | * possible control vq. |
| 2325 | */ |
| 2326 | total_vqs = vi->max_queue_pairs * 2 + |
| 2327 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); |
| 2328 | |
| 2329 | /* Allocate space for find_vqs parameters */ |
| 2330 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); |
| 2331 | if (!vqs) |
| 2332 | goto err_vq; |
| 2333 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); |
| 2334 | if (!callbacks) |
| 2335 | goto err_callback; |
| 2336 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); |
| 2337 | if (!names) |
| 2338 | goto err_names; |
Jason Wang | 192f68c | 2017-07-19 16:54:47 +0800 | [diff] [blame] | 2339 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2340 | ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); |
| 2341 | if (!ctx) |
| 2342 | goto err_ctx; |
| 2343 | } else { |
| 2344 | ctx = NULL; |
| 2345 | } |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2346 | |
| 2347 | /* Parameters for control virtqueue, if any */ |
| 2348 | if (vi->has_cvq) { |
| 2349 | callbacks[total_vqs - 1] = NULL; |
| 2350 | names[total_vqs - 1] = "control"; |
| 2351 | } |
| 2352 | |
| 2353 | /* Allocate/initialize parameters for send/receive virtqueues */ |
| 2354 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2355 | callbacks[rxq2vq(i)] = skb_recv_done; |
| 2356 | callbacks[txq2vq(i)] = skb_xmit_done; |
| 2357 | sprintf(vi->rq[i].name, "input.%d", i); |
| 2358 | sprintf(vi->sq[i].name, "output.%d", i); |
| 2359 | names[rxq2vq(i)] = vi->rq[i].name; |
| 2360 | names[txq2vq(i)] = vi->sq[i].name; |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2361 | if (ctx) |
| 2362 | ctx[rxq2vq(i)] = true; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2363 | } |
| 2364 | |
| 2365 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2366 | names, ctx, NULL); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2367 | if (ret) |
| 2368 | goto err_find; |
| 2369 | |
| 2370 | if (vi->has_cvq) { |
| 2371 | vi->cvq = vqs[total_vqs - 1]; |
| 2372 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
Patrick McHardy | f646968 | 2013-04-19 02:04:27 +0000 | [diff] [blame] | 2373 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2374 | } |
| 2375 | |
| 2376 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2377 | vi->rq[i].vq = vqs[rxq2vq(i)]; |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2378 | vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2379 | vi->sq[i].vq = vqs[txq2vq(i)]; |
| 2380 | } |
| 2381 | |
| 2382 | kfree(names); |
| 2383 | kfree(callbacks); |
| 2384 | kfree(vqs); |
Jason Wang | 5528162 | 2017-07-07 19:56:09 +0800 | [diff] [blame] | 2385 | kfree(ctx); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2386 | |
| 2387 | return 0; |
| 2388 | |
| 2389 | err_find: |
Michael S. Tsirkin | d45b897 | 2017-03-06 20:31:21 +0200 | [diff] [blame] | 2390 | kfree(ctx); |
| 2391 | err_ctx: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2392 | kfree(names); |
| 2393 | err_names: |
| 2394 | kfree(callbacks); |
| 2395 | err_callback: |
| 2396 | kfree(vqs); |
| 2397 | err_vq: |
| 2398 | return ret; |
| 2399 | } |
| 2400 | |
| 2401 | static int virtnet_alloc_queues(struct virtnet_info *vi) |
| 2402 | { |
| 2403 | int i; |
| 2404 | |
| 2405 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
| 2406 | if (!vi->sq) |
| 2407 | goto err_sq; |
| 2408 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); |
Amerigo Wang | 008d427 | 2012-12-10 02:24:08 +0000 | [diff] [blame] | 2409 | if (!vi->rq) |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2410 | goto err_rq; |
| 2411 | |
| 2412 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
| 2413 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2414 | vi->rq[i].pages = NULL; |
| 2415 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, |
| 2416 | napi_weight); |
Willem de Bruijn | 1d11e73 | 2017-04-27 20:37:58 -0400 | [diff] [blame] | 2417 | netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, |
| 2418 | napi_tx ? napi_weight : 0); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2419 | |
| 2420 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 2421 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2422 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
| 2423 | } |
| 2424 | |
| 2425 | return 0; |
| 2426 | |
| 2427 | err_rq: |
| 2428 | kfree(vi->sq); |
| 2429 | err_sq: |
| 2430 | return -ENOMEM; |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2431 | } |
| 2432 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2433 | static int init_vqs(struct virtnet_info *vi) |
| 2434 | { |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2435 | int ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2436 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2437 | /* Allocate send & receive queues */ |
| 2438 | ret = virtnet_alloc_queues(vi); |
| 2439 | if (ret) |
| 2440 | goto err; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2441 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2442 | ret = virtnet_find_vqs(vi); |
| 2443 | if (ret) |
| 2444 | goto err_free; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2445 | |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2446 | get_online_cpus(); |
Wanlong Gao | 8898c21 | 2013-01-24 23:51:30 +0000 | [diff] [blame] | 2447 | virtnet_set_affinity(vi); |
Wanlong Gao | 47be247 | 2013-01-24 23:51:29 +0000 | [diff] [blame] | 2448 | put_online_cpus(); |
| 2449 | |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2450 | return 0; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2451 | |
| 2452 | err_free: |
| 2453 | virtnet_free_queues(vi); |
| 2454 | err: |
| 2455 | return ret; |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2456 | } |
| 2457 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2458 | #ifdef CONFIG_SYSFS |
| 2459 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, |
stephen hemminger | 718ad68 | 2017-08-18 13:46:24 -0700 | [diff] [blame] | 2460 | char *buf) |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2461 | { |
| 2462 | struct virtnet_info *vi = netdev_priv(queue->dev); |
| 2463 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
Johannes Berg | 5377d758 | 2015-08-19 09:48:40 +0200 | [diff] [blame] | 2464 | struct ewma_pkt_len *avg; |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2465 | |
| 2466 | BUG_ON(queue_index >= vi->max_queue_pairs); |
| 2467 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
Michael S. Tsirkin | d85b758f7 | 2017-03-09 02:21:21 +0200 | [diff] [blame] | 2468 | return sprintf(buf, "%u\n", |
| 2469 | get_mergeable_buf_len(&vi->rq[queue_index], avg)); |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2470 | } |
| 2471 | |
| 2472 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
| 2473 | __ATTR_RO(mergeable_rx_buffer_size); |
| 2474 | |
| 2475 | static struct attribute *virtio_net_mrg_rx_attrs[] = { |
| 2476 | &mergeable_rx_buffer_size_attribute.attr, |
| 2477 | NULL |
| 2478 | }; |
| 2479 | |
| 2480 | static const struct attribute_group virtio_net_mrg_rx_group = { |
| 2481 | .name = "virtio_net", |
| 2482 | .attrs = virtio_net_mrg_rx_attrs |
| 2483 | }; |
| 2484 | #endif |
| 2485 | |
Jason Wang | 892d6eb | 2014-11-20 17:03:05 +0800 | [diff] [blame] | 2486 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
| 2487 | unsigned int fbit, |
| 2488 | const char *fname, const char *dname) |
| 2489 | { |
| 2490 | if (!virtio_has_feature(vdev, fbit)) |
| 2491 | return false; |
| 2492 | |
| 2493 | dev_err(&vdev->dev, "device advertises feature %s but not %s", |
| 2494 | fname, dname); |
| 2495 | |
| 2496 | return true; |
| 2497 | } |
| 2498 | |
| 2499 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ |
| 2500 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) |
| 2501 | |
| 2502 | static bool virtnet_validate_features(struct virtio_device *vdev) |
| 2503 | { |
| 2504 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && |
| 2505 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, |
| 2506 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2507 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, |
| 2508 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2509 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, |
| 2510 | "VIRTIO_NET_F_CTRL_VQ") || |
| 2511 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || |
| 2512 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, |
| 2513 | "VIRTIO_NET_F_CTRL_VQ"))) { |
| 2514 | return false; |
| 2515 | } |
| 2516 | |
| 2517 | return true; |
| 2518 | } |
| 2519 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 2520 | #define MIN_MTU ETH_MIN_MTU |
| 2521 | #define MAX_MTU ETH_MAX_MTU |
| 2522 | |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2523 | static int virtnet_validate(struct virtio_device *vdev) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2524 | { |
Michael S. Tsirkin | 6ba4224 | 2015-01-12 16:23:37 +0200 | [diff] [blame] | 2525 | if (!vdev->config->get) { |
| 2526 | dev_err(&vdev->dev, "%s failure: config access disabled\n", |
| 2527 | __func__); |
| 2528 | return -EINVAL; |
| 2529 | } |
| 2530 | |
Jason Wang | 892d6eb | 2014-11-20 17:03:05 +0800 | [diff] [blame] | 2531 | if (!virtnet_validate_features(vdev)) |
| 2532 | return -EINVAL; |
| 2533 | |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2534 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
| 2535 | int mtu = virtio_cread16(vdev, |
| 2536 | offsetof(struct virtio_net_config, |
| 2537 | mtu)); |
| 2538 | if (mtu < MIN_MTU) |
| 2539 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); |
| 2540 | } |
| 2541 | |
| 2542 | return 0; |
| 2543 | } |
| 2544 | |
| 2545 | static int virtnet_probe(struct virtio_device *vdev) |
| 2546 | { |
| 2547 | int i, err; |
| 2548 | struct net_device *dev; |
| 2549 | struct virtnet_info *vi; |
| 2550 | u16 max_queue_pairs; |
| 2551 | int mtu; |
| 2552 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2553 | /* Find if host supports multiqueue virtio_net device */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 2554 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
| 2555 | struct virtio_net_config, |
| 2556 | max_virtqueue_pairs, &max_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2557 | |
| 2558 | /* We need at least 2 queue's */ |
| 2559 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
| 2560 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
| 2561 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 2562 | max_queue_pairs = 1; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2563 | |
| 2564 | /* Allocate ourselves a network device with room for our info */ |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2565 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2566 | if (!dev) |
| 2567 | return -ENOMEM; |
| 2568 | |
| 2569 | /* Set up network device as normal. */ |
Jiri Pirko | f2f2c8b | 2012-06-29 05:10:06 +0000 | [diff] [blame] | 2570 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
Stephen Hemminger | 76288b4 | 2009-01-06 10:44:22 -0800 | [diff] [blame] | 2571 | dev->netdev_ops = &virtnet_netdev; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2572 | dev->features = NETIF_F_HIGHDMA; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2573 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 2574 | dev->ethtool_ops = &virtnet_ethtool_ops; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2575 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 2576 | |
| 2577 | /* Do we support "hardware" checksums? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2578 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2579 | /* This opens up the world of extra features. */ |
Jason Wang | 48900cb | 2015-08-05 10:34:04 +0800 | [diff] [blame] | 2580 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2581 | if (csum) |
Jason Wang | 48900cb | 2015-08-05 10:34:04 +0800 | [diff] [blame] | 2582 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2583 | |
| 2584 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
David S. Miller | e078de0 | 2017-07-03 06:37:32 -0700 | [diff] [blame] | 2585 | dev->hw_features |= NETIF_F_TSO |
Rusty Russell | 34a4857 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 2586 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 2587 | } |
Rusty Russell | 5539ae96 | 2008-05-02 21:50:46 -0500 | [diff] [blame] | 2588 | /* Individual feature bits: what can host handle? */ |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2589 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 2590 | dev->hw_features |= NETIF_F_TSO; |
| 2591 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 2592 | dev->hw_features |= NETIF_F_TSO6; |
| 2593 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 2594 | dev->hw_features |= NETIF_F_TSO_ECN; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2595 | |
Jason Wang | 41f2f12 | 2014-12-24 11:03:52 +0800 | [diff] [blame] | 2596 | dev->features |= NETIF_F_GSO_ROBUST; |
| 2597 | |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2598 | if (gso) |
David S. Miller | e078de0 | 2017-07-03 06:37:32 -0700 | [diff] [blame] | 2599 | dev->features |= dev->hw_features & NETIF_F_ALL_TSO; |
Michał Mirosław | 98e778c | 2011-03-31 01:01:35 +0000 | [diff] [blame] | 2600 | /* (!csum && gso) case will be fixed by register_netdev() */ |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2601 | } |
Thomas Huth | 4f49129 | 2013-08-27 17:09:02 +0200 | [diff] [blame] | 2602 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
| 2603 | dev->features |= NETIF_F_RXCSUM; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2604 | |
Jason Wang | 4fda830 | 2013-04-10 23:32:21 +0000 | [diff] [blame] | 2605 | dev->vlan_features = dev->features; |
| 2606 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 2607 | /* MTU range: 68 - 65535 */ |
| 2608 | dev->min_mtu = MIN_MTU; |
| 2609 | dev->max_mtu = MAX_MTU; |
| 2610 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2611 | /* Configuration may specify what MAC to use. Otherwise random. */ |
Rusty Russell | 855e0c5 | 2013-10-14 18:11:51 +1030 | [diff] [blame] | 2612 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 2613 | virtio_cread_bytes(vdev, |
| 2614 | offsetof(struct virtio_net_config, mac), |
| 2615 | dev->dev_addr, dev->addr_len); |
| 2616 | else |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 2617 | eth_hw_addr_random(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2618 | |
| 2619 | /* Set up our device-specific information */ |
| 2620 | vi = netdev_priv(dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2621 | vi->dev = dev; |
| 2622 | vi->vdev = vdev; |
Christian Borntraeger | d9d5dcc | 2008-02-18 10:02:51 +0100 | [diff] [blame] | 2623 | vdev->priv = vi; |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2624 | vi->stats = alloc_percpu(struct virtnet_stats); |
| 2625 | err = -ENOMEM; |
| 2626 | if (vi->stats == NULL) |
| 2627 | goto free; |
| 2628 | |
John Stultz | 827da44 | 2013-10-07 15:51:58 -0700 | [diff] [blame] | 2629 | for_each_possible_cpu(i) { |
| 2630 | struct virtnet_stats *virtnet_stats; |
| 2631 | virtnet_stats = per_cpu_ptr(vi->stats, i); |
| 2632 | u64_stats_init(&virtnet_stats->tx_syncp); |
| 2633 | u64_stats_init(&virtnet_stats->rx_syncp); |
| 2634 | } |
| 2635 | |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2636 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2637 | |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 2638 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 2639 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 2640 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
Vlad Yasevich | e3e3c42 | 2015-02-03 16:36:17 -0500 | [diff] [blame] | 2641 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 2642 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) |
Herbert Xu | 97402b9 | 2008-04-18 11:24:27 +0800 | [diff] [blame] | 2643 | vi->big_packets = true; |
| 2644 | |
Mark McLoughlin | 3f2c31d | 2008-11-16 22:41:34 -0800 | [diff] [blame] | 2645 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 2646 | vi->mergeable_rx_bufs = true; |
| 2647 | |
Michael S. Tsirkin | d04302b | 2014-10-24 00:24:03 +0300 | [diff] [blame] | 2648 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || |
| 2649 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 2650 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 2651 | else |
| 2652 | vi->hdr_len = sizeof(struct virtio_net_hdr); |
| 2653 | |
Michael S. Tsirkin | 7599330 | 2015-07-15 15:26:19 +0300 | [diff] [blame] | 2654 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
| 2655 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 2656 | vi->any_header_sg = true; |
| 2657 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2658 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 2659 | vi->has_cvq = true; |
| 2660 | |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 2661 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
| 2662 | mtu = virtio_cread16(vdev, |
| 2663 | offsetof(struct virtio_net_config, |
| 2664 | mtu)); |
Aaron Conole | 93a205e | 2016-10-25 16:12:12 -0400 | [diff] [blame] | 2665 | if (mtu < dev->min_mtu) { |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2666 | /* Should never trigger: MTU was previously validated |
| 2667 | * in virtnet_validate. |
| 2668 | */ |
| 2669 | dev_err(&vdev->dev, "device MTU appears to have changed " |
| 2670 | "it is now %d < %d", mtu, dev->min_mtu); |
| 2671 | goto free_stats; |
Aaron Conole | 93a205e | 2016-10-25 16:12:12 -0400 | [diff] [blame] | 2672 | } |
Michael S. Tsirkin | 2e123b4 | 2017-03-08 02:14:25 +0200 | [diff] [blame] | 2673 | |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2674 | dev->mtu = mtu; |
| 2675 | dev->max_mtu = mtu; |
| 2676 | |
Michael S. Tsirkin | 2e123b4 | 2017-03-08 02:14:25 +0200 | [diff] [blame] | 2677 | /* TODO: size buffers correctly in this case. */ |
| 2678 | if (dev->mtu > ETH_DATA_LEN) |
| 2679 | vi->big_packets = true; |
Aaron Conole | 14de9d1 | 2016-06-03 16:57:12 -0400 | [diff] [blame] | 2680 | } |
| 2681 | |
Michael S. Tsirkin | 012873d | 2014-10-24 16:55:57 +0300 | [diff] [blame] | 2682 | if (vi->any_header_sg) |
| 2683 | dev->needed_headroom = vi->hdr_len; |
Zhangjie \(HZ\) | 6ebbc1a | 2014-04-29 18:43:22 +0800 | [diff] [blame] | 2684 | |
Jason Wang | 4490001 | 2016-11-25 12:37:26 +0800 | [diff] [blame] | 2685 | /* Enable multiqueue by default */ |
| 2686 | if (num_online_cpus() >= max_queue_pairs) |
| 2687 | vi->curr_queue_pairs = max_queue_pairs; |
| 2688 | else |
| 2689 | vi->curr_queue_pairs = num_online_cpus(); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2690 | vi->max_queue_pairs = max_queue_pairs; |
| 2691 | |
| 2692 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ |
Amit Shah | 3f9c10b | 2011-12-22 16:58:31 +0530 | [diff] [blame] | 2693 | err = init_vqs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2694 | if (err) |
Jason Wang | 9bb8ca8 | 2013-11-05 18:19:45 +0800 | [diff] [blame] | 2695 | goto free_stats; |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2696 | |
Michael Dalton | fbf28d7 | 2014-01-16 22:23:30 -0800 | [diff] [blame] | 2697 | #ifdef CONFIG_SYSFS |
| 2698 | if (vi->mergeable_rx_bufs) |
| 2699 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; |
| 2700 | #endif |
Zhi Yong Wu | 0f13b66 | 2013-11-18 21:19:27 +0800 | [diff] [blame] | 2701 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
| 2702 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2703 | |
Nikolay Aleksandrov | 16032be | 2016-02-03 04:04:37 +0100 | [diff] [blame] | 2704 | virtnet_init_settings(dev); |
| 2705 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2706 | err = register_netdev(dev); |
| 2707 | if (err) { |
| 2708 | pr_debug("virtio_net: registering device failed\n"); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2709 | goto free_vqs; |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2710 | } |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 2711 | |
Michael S. Tsirkin | 4baf1e3 | 2014-10-15 10:22:30 +1030 | [diff] [blame] | 2712 | virtio_device_ready(vdev); |
| 2713 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2714 | err = virtnet_cpu_notif_add(vi); |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 2715 | if (err) { |
| 2716 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
wangyunjian | f00e35e | 2016-05-31 11:52:43 +0800 | [diff] [blame] | 2717 | goto free_unregister_netdev; |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 2718 | } |
| 2719 | |
Jason Wang | a220871 | 2016-12-13 14:23:05 +0800 | [diff] [blame] | 2720 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
Jason Wang | 4490001 | 2016-11-25 12:37:26 +0800 | [diff] [blame] | 2721 | |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 2722 | /* Assume link up if device can't report link status, |
| 2723 | otherwise get link status from config. */ |
| 2724 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
| 2725 | netif_carrier_off(dev); |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 2726 | schedule_work(&vi->config_work); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 2727 | } else { |
| 2728 | vi->status = VIRTIO_NET_S_LINK_UP; |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 2729 | virtnet_update_settings(vi); |
Jason Wang | 167c25e | 2010-11-10 14:45:41 +0000 | [diff] [blame] | 2730 | netif_carrier_on(dev); |
| 2731 | } |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2732 | |
Jason Wang | 3f93522 | 2017-07-19 16:54:49 +0800 | [diff] [blame] | 2733 | for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) |
| 2734 | if (virtio_has_feature(vi->vdev, guest_offloads[i])) |
| 2735 | set_bit(guest_offloads[i], &vi->guest_offloads); |
| 2736 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2737 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
| 2738 | dev->name, max_queue_pairs); |
| 2739 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2740 | return 0; |
| 2741 | |
wangyunjian | f00e35e | 2016-05-31 11:52:43 +0800 | [diff] [blame] | 2742 | free_unregister_netdev: |
Michael S. Tsirkin | 0246555 | 2014-10-15 10:22:31 +1030 | [diff] [blame] | 2743 | vi->vdev->config->reset(vdev); |
| 2744 | |
Rusty Russell | b3369c1 | 2008-02-04 23:50:02 -0500 | [diff] [blame] | 2745 | unregister_netdev(dev); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2746 | free_vqs: |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2747 | cancel_delayed_work_sync(&vi->refill); |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 2748 | free_receive_page_frags(vi); |
Jason Wang | e9d7417 | 2012-12-07 07:04:55 +0000 | [diff] [blame] | 2749 | virtnet_del_vqs(vi); |
stephen hemminger | 3fa2a1d | 2011-06-15 06:36:29 +0000 | [diff] [blame] | 2750 | free_stats: |
| 2751 | free_percpu(vi->stats); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2752 | free: |
| 2753 | free_netdev(dev); |
| 2754 | return err; |
| 2755 | } |
| 2756 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2757 | static void remove_vq_common(struct virtnet_info *vi) |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2758 | { |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2759 | vi->vdev->config->reset(vi->vdev); |
Shirley Ma | 830a8a9 | 2010-02-08 14:14:42 +0000 | [diff] [blame] | 2760 | |
| 2761 | /* Free unused buffers in both send and recv, if any. */ |
Shirley Ma | 9ab86bb | 2010-01-29 03:20:04 +0000 | [diff] [blame] | 2762 | free_unused_bufs(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 2763 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2764 | free_receive_bufs(vi); |
Michael S. Tsirkin | d2a7ddd | 2009-06-12 22:16:36 -0600 | [diff] [blame] | 2765 | |
Michael Dalton | fb51879 | 2014-01-16 22:23:26 -0800 | [diff] [blame] | 2766 | free_receive_page_frags(vi); |
| 2767 | |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2768 | virtnet_del_vqs(vi); |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2769 | } |
| 2770 | |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 2771 | static void virtnet_remove(struct virtio_device *vdev) |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2772 | { |
| 2773 | struct virtnet_info *vi = vdev->priv; |
| 2774 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2775 | virtnet_cpu_notif_remove(vi); |
Wanlong Gao | 8de4b2f | 2013-01-24 23:51:31 +0000 | [diff] [blame] | 2776 | |
Michael S. Tsirkin | 102a278 | 2014-10-15 10:22:29 +1030 | [diff] [blame] | 2777 | /* Make sure no work handler is accessing the device. */ |
| 2778 | flush_work(&vi->config_work); |
Jason Wang | 586d17c | 2012-04-11 20:43:52 +0000 | [diff] [blame] | 2779 | |
Amit Shah | 04486ed | 2011-12-22 16:58:32 +0530 | [diff] [blame] | 2780 | unregister_netdev(vi->dev); |
| 2781 | |
| 2782 | remove_vq_common(vi); |
Rusty Russell | fb6813f | 2008-07-25 12:06:01 -0500 | [diff] [blame] | 2783 | |
Krishna Kumar | 2e66f55 | 2011-07-20 03:56:02 +0000 | [diff] [blame] | 2784 | free_percpu(vi->stats); |
Rusty Russell | 74b2553 | 2007-11-19 11:20:42 -0500 | [diff] [blame] | 2785 | free_netdev(vi->dev); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2786 | } |
| 2787 | |
Arnd Bergmann | 67a7519 | 2017-07-25 17:35:50 +0200 | [diff] [blame] | 2788 | static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2789 | { |
| 2790 | struct virtnet_info *vi = vdev->priv; |
| 2791 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2792 | virtnet_cpu_notif_remove(vi); |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2793 | virtnet_freeze_down(vdev); |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2794 | remove_vq_common(vi); |
| 2795 | |
| 2796 | return 0; |
| 2797 | } |
| 2798 | |
Arnd Bergmann | 67a7519 | 2017-07-25 17:35:50 +0200 | [diff] [blame] | 2799 | static __maybe_unused int virtnet_restore(struct virtio_device *vdev) |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2800 | { |
| 2801 | struct virtnet_info *vi = vdev->priv; |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2802 | int err; |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2803 | |
John Fastabend | 9fe7bfc | 2017-02-02 19:16:01 -0800 | [diff] [blame] | 2804 | err = virtnet_restore_up(vdev); |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2805 | if (err) |
| 2806 | return err; |
Jason Wang | 986a4f4 | 2012-12-07 07:04:56 +0000 | [diff] [blame] | 2807 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
| 2808 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2809 | err = virtnet_cpu_notif_add(vi); |
Jason Wang | ec9debb | 2013-10-29 15:11:07 +0800 | [diff] [blame] | 2810 | if (err) |
| 2811 | return err; |
| 2812 | |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2813 | return 0; |
| 2814 | } |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2815 | |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2816 | static struct virtio_device_id id_table[] = { |
| 2817 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 2818 | { 0 }, |
| 2819 | }; |
| 2820 | |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 2821 | #define VIRTNET_FEATURES \ |
| 2822 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ |
| 2823 | VIRTIO_NET_F_MAC, \ |
| 2824 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ |
| 2825 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ |
| 2826 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ |
| 2827 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ |
| 2828 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ |
| 2829 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ |
| 2830 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ |
Jason Baron | faa9b39 | 2018-01-05 17:44:54 -0500 | [diff] [blame] | 2831 | VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ |
| 2832 | VIRTIO_NET_F_SPEED_DUPLEX |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 2833 | |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 2834 | static unsigned int features[] = { |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 2835 | VIRTNET_FEATURES, |
| 2836 | }; |
| 2837 | |
| 2838 | static unsigned int features_legacy[] = { |
| 2839 | VIRTNET_FEATURES, |
| 2840 | VIRTIO_NET_F_GSO, |
Michael S. Tsirkin | e7428e9 | 2013-07-25 10:20:23 +0930 | [diff] [blame] | 2841 | VIRTIO_F_ANY_LAYOUT, |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 2842 | }; |
| 2843 | |
Uwe Kleine-König | 2240252 | 2009-11-05 01:32:44 -0800 | [diff] [blame] | 2844 | static struct virtio_driver virtio_net_driver = { |
Rusty Russell | c45a681 | 2008-05-02 21:50:50 -0500 | [diff] [blame] | 2845 | .feature_table = features, |
| 2846 | .feature_table_size = ARRAY_SIZE(features), |
Michael S. Tsirkin | f335850 | 2016-11-04 12:55:36 +0200 | [diff] [blame] | 2847 | .feature_table_legacy = features_legacy, |
| 2848 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2849 | .driver.name = KBUILD_MODNAME, |
| 2850 | .driver.owner = THIS_MODULE, |
| 2851 | .id_table = id_table, |
Michael S. Tsirkin | fe36cbe | 2017-03-29 19:09:14 +0300 | [diff] [blame] | 2852 | .validate = virtnet_validate, |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2853 | .probe = virtnet_probe, |
Bill Pemberton | 8cc085d | 2012-12-03 09:24:15 -0500 | [diff] [blame] | 2854 | .remove = virtnet_remove, |
Mark McLoughlin | 9f4d26d | 2009-01-19 17:09:49 -0800 | [diff] [blame] | 2855 | .config_changed = virtnet_config_changed, |
Aaron Lu | 8910700 | 2013-09-17 09:25:23 +0930 | [diff] [blame] | 2856 | #ifdef CONFIG_PM_SLEEP |
Amit Shah | 0741bcb | 2011-12-22 16:58:33 +0530 | [diff] [blame] | 2857 | .freeze = virtnet_freeze, |
| 2858 | .restore = virtnet_restore, |
| 2859 | #endif |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2860 | }; |
| 2861 | |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2862 | static __init int virtio_net_driver_init(void) |
| 2863 | { |
| 2864 | int ret; |
| 2865 | |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 2866 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2867 | virtnet_cpu_online, |
| 2868 | virtnet_cpu_down_prep); |
| 2869 | if (ret < 0) |
| 2870 | goto out; |
| 2871 | virtionet_online = ret; |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 2872 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2873 | NULL, virtnet_cpu_dead); |
| 2874 | if (ret) |
| 2875 | goto err_dead; |
| 2876 | |
| 2877 | ret = register_virtio_driver(&virtio_net_driver); |
| 2878 | if (ret) |
| 2879 | goto err_virtio; |
| 2880 | return 0; |
| 2881 | err_virtio: |
| 2882 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 2883 | err_dead: |
| 2884 | cpuhp_remove_multi_state(virtionet_online); |
| 2885 | out: |
| 2886 | return ret; |
| 2887 | } |
| 2888 | module_init(virtio_net_driver_init); |
| 2889 | |
| 2890 | static __exit void virtio_net_driver_exit(void) |
| 2891 | { |
Andrew Jones | cfa0ebc | 2017-07-24 15:38:32 +0200 | [diff] [blame] | 2892 | unregister_virtio_driver(&virtio_net_driver); |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2893 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 2894 | cpuhp_remove_multi_state(virtionet_online); |
Sebastian Andrzej Siewior | 8017c27 | 2016-08-12 19:49:43 +0200 | [diff] [blame] | 2895 | } |
| 2896 | module_exit(virtio_net_driver_exit); |
Rusty Russell | 296f96f | 2007-10-22 11:03:37 +1000 | [diff] [blame] | 2897 | |
| 2898 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 2899 | MODULE_DESCRIPTION("Virtio network driver"); |
| 2900 | MODULE_LICENSE("GPL"); |