Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 1 | /* |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 2 | * Copyright (c) 2009, Microsoft Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
Jeff Kirsher | adf8d3f | 2013-12-06 06:28:47 -0800 | [diff] [blame] | 14 | * this program; if not, see <http://www.gnu.org/licenses/>. |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 15 | * |
| 16 | * Authors: |
Haiyang Zhang | d0e94d1 | 2009-11-23 17:00:22 +0000 | [diff] [blame] | 17 | * Haiyang Zhang <haiyangz@microsoft.com> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 18 | * Hank Janssen <hjanssen@microsoft.com> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 19 | */ |
Hank Janssen | eb335bc | 2011-03-29 13:58:48 -0700 | [diff] [blame] | 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 21 | |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 22 | #include <linux/init.h> |
K. Y. Srinivasan | 9079ce6 | 2011-06-16 13:16:37 -0700 | [diff] [blame] | 23 | #include <linux/atomic.h> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 24 | #include <linux/module.h> |
| 25 | #include <linux/highmem.h> |
| 26 | #include <linux/device.h> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 27 | #include <linux/io.h> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 28 | #include <linux/delay.h> |
| 29 | #include <linux/netdevice.h> |
| 30 | #include <linux/inetdevice.h> |
| 31 | #include <linux/etherdevice.h> |
Stephen Hemminger | b93c1b5 | 2018-08-21 10:40:38 -0700 | [diff] [blame] | 32 | #include <linux/pci.h> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 33 | #include <linux/skbuff.h> |
Haiyang Zhang | c802db1 | 2013-05-28 06:15:56 +0000 | [diff] [blame] | 34 | #include <linux/if_vlan.h> |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 35 | #include <linux/in.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/slab.h> |
stephen hemminger | 27f5aa9 | 2017-07-24 10:57:29 -0700 | [diff] [blame] | 37 | #include <linux/rtnetlink.h> |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 38 | #include <linux/netpoll.h> |
stephen hemminger | 27f5aa9 | 2017-07-24 10:57:29 -0700 | [diff] [blame] | 39 | |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 40 | #include <net/arp.h> |
| 41 | #include <net/route.h> |
| 42 | #include <net/sock.h> |
| 43 | #include <net/pkt_sched.h> |
Michael Kelley | 8eb1b3c | 2017-05-30 11:36:56 -0700 | [diff] [blame] | 44 | #include <net/checksum.h> |
| 45 | #include <net/ip6_checksum.h> |
K. Y. Srinivasan | 3f335ea | 2011-05-12 19:34:15 -0700 | [diff] [blame] | 46 | |
K. Y. Srinivasan | 5ca7252 | 2011-05-12 19:34:37 -0700 | [diff] [blame] | 47 | #include "hyperv_net.h" |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 48 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 49 | #define RING_SIZE_MIN 64 |
| 50 | #define RETRY_US_LO 5000 |
| 51 | #define RETRY_US_HI 10000 |
| 52 | #define RETRY_MAX 2000 /* >10 sec */ |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 53 | |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 54 | #define LINKCHANGE_INT (2 * HZ) |
stephen hemminger | 6123c66 | 2017-08-09 17:46:03 -0700 | [diff] [blame] | 55 | #define VF_TAKEOVER_INT (HZ / 10) |
stephen hemminger | a50af86 | 2016-12-06 13:43:54 -0800 | [diff] [blame] | 56 | |
Stephen Hemminger | a7f99d0 | 2017-12-01 11:01:47 -0800 | [diff] [blame] | 57 | static unsigned int ring_size __ro_after_init = 128; |
Joe Perches | d61e403 | 2018-03-23 15:54:39 -0700 | [diff] [blame] | 58 | module_param(ring_size, uint, 0444); |
Stephen Hemminger | 450d7a4 | 2010-05-04 09:58:53 -0700 | [diff] [blame] | 59 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); |
Stephen Hemminger | a7f99d0 | 2017-12-01 11:01:47 -0800 | [diff] [blame] | 60 | unsigned int netvsc_ring_bytes __ro_after_init; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 61 | |
Simon Xiao | 3f300ff | 2015-04-28 01:05:17 -0700 | [diff] [blame] | 62 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | |
| 63 | NETIF_MSG_LINK | NETIF_MSG_IFUP | |
| 64 | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | |
| 65 | NETIF_MSG_TX_ERR; |
| 66 | |
| 67 | static int debug = -1; |
Joe Perches | d61e403 | 2018-03-23 15:54:39 -0700 | [diff] [blame] | 68 | module_param(debug, int, 0444); |
Simon Xiao | 3f300ff | 2015-04-28 01:05:17 -0700 | [diff] [blame] | 69 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
| 70 | |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 71 | static LIST_HEAD(netvsc_dev_list); |
| 72 | |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 73 | static void netvsc_change_rx_flags(struct net_device *net, int change) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 74 | { |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 75 | struct net_device_context *ndev_ctx = netdev_priv(net); |
| 76 | struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
| 77 | int inc; |
| 78 | |
| 79 | if (!vf_netdev) |
| 80 | return; |
| 81 | |
| 82 | if (change & IFF_PROMISC) { |
| 83 | inc = (net->flags & IFF_PROMISC) ? 1 : -1; |
| 84 | dev_set_promiscuity(vf_netdev, inc); |
| 85 | } |
| 86 | |
| 87 | if (change & IFF_ALLMULTI) { |
| 88 | inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; |
| 89 | dev_set_allmulti(vf_netdev, inc); |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | static void netvsc_set_rx_mode(struct net_device *net) |
| 94 | { |
| 95 | struct net_device_context *ndev_ctx = netdev_priv(net); |
Stephen Hemminger | 35a57b7 | 2018-03-07 13:49:11 -0800 | [diff] [blame] | 96 | struct net_device *vf_netdev; |
| 97 | struct netvsc_device *nvdev; |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 98 | |
Stephen Hemminger | 35a57b7 | 2018-03-07 13:49:11 -0800 | [diff] [blame] | 99 | rcu_read_lock(); |
| 100 | vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 101 | if (vf_netdev) { |
| 102 | dev_uc_sync(vf_netdev, net); |
| 103 | dev_mc_sync(vf_netdev, net); |
| 104 | } |
Haiyang Zhang | d426b2e | 2011-11-30 07:19:08 -0800 | [diff] [blame] | 105 | |
Stephen Hemminger | 35a57b7 | 2018-03-07 13:49:11 -0800 | [diff] [blame] | 106 | nvdev = rcu_dereference(ndev_ctx->nvdev); |
| 107 | if (nvdev) |
| 108 | rndis_filter_update(nvdev); |
| 109 | rcu_read_unlock(); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 110 | } |
| 111 | |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 112 | static int netvsc_open(struct net_device *net) |
| 113 | { |
Haiyang Zhang | 53fa1a6 | 2017-06-21 16:40:47 -0700 | [diff] [blame] | 114 | struct net_device_context *ndev_ctx = netdev_priv(net); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 115 | struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
stephen hemminger | 79e8cbe | 2017-07-19 11:53:13 -0700 | [diff] [blame] | 116 | struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 117 | struct rndis_device *rdev; |
Greg Kroah-Hartman | 02fafbc | 2009-08-31 21:09:45 -0700 | [diff] [blame] | 118 | int ret = 0; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 119 | |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 120 | netif_carrier_off(net); |
| 121 | |
Haiyang Zhang | d515d0f | 2011-09-28 13:24:15 -0700 | [diff] [blame] | 122 | /* Open up the device */ |
Vitaly Kuznetsov | 2f5fa6c | 2016-06-03 17:51:00 +0200 | [diff] [blame] | 123 | ret = rndis_filter_open(nvdev); |
Haiyang Zhang | d515d0f | 2011-09-28 13:24:15 -0700 | [diff] [blame] | 124 | if (ret != 0) { |
| 125 | netdev_err(net, "unable to open device (ret %d).\n", ret); |
| 126 | return ret; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 127 | } |
| 128 | |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 129 | rdev = nvdev->extension; |
Dexuan Cui | 52acf73 | 2018-06-06 21:32:51 +0000 | [diff] [blame] | 130 | if (!rdev->link_state) { |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 131 | netif_carrier_on(net); |
Dexuan Cui | 52acf73 | 2018-06-06 21:32:51 +0000 | [diff] [blame] | 132 | netif_tx_wake_all_queues(net); |
| 133 | } |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 134 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 135 | if (vf_netdev) { |
| 136 | /* Setting synthetic device up transparently sets |
| 137 | * slave as up. If open fails, then slave will be |
| 138 | * still be offline (and not used). |
| 139 | */ |
| 140 | ret = dev_open(vf_netdev); |
| 141 | if (ret) |
| 142 | netdev_warn(net, |
| 143 | "unable to open slave: %s: %d\n", |
| 144 | vf_netdev->name, ret); |
| 145 | } |
| 146 | return 0; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 147 | } |
| 148 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 149 | static int netvsc_wait_until_empty(struct netvsc_device *nvdev) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 150 | { |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 151 | unsigned int retry = 0; |
| 152 | int i; |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 153 | |
| 154 | /* Ensure pending bytes in ring are read */ |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 155 | for (;;) { |
| 156 | u32 aread = 0; |
| 157 | |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 158 | for (i = 0; i < nvdev->num_chn; i++) { |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 159 | struct vmbus_channel *chn |
| 160 | = nvdev->chan_table[i].channel; |
| 161 | |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 162 | if (!chn) |
| 163 | continue; |
| 164 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 165 | /* make sure receive not running now */ |
| 166 | napi_synchronize(&nvdev->chan_table[i].napi); |
| 167 | |
stephen hemminger | 4097596 | 2017-06-08 16:21:19 -0700 | [diff] [blame] | 168 | aread = hv_get_bytes_to_read(&chn->inbound); |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 169 | if (aread) |
| 170 | break; |
| 171 | |
stephen hemminger | 4097596 | 2017-06-08 16:21:19 -0700 | [diff] [blame] | 172 | aread = hv_get_bytes_to_read(&chn->outbound); |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 173 | if (aread) |
| 174 | break; |
| 175 | } |
| 176 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 177 | if (aread == 0) |
| 178 | return 0; |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 179 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 180 | if (++retry > RETRY_MAX) |
| 181 | return -ETIMEDOUT; |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 182 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 183 | usleep_range(RETRY_US_LO, RETRY_US_HI); |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | static int netvsc_close(struct net_device *net) |
| 188 | { |
| 189 | struct net_device_context *net_device_ctx = netdev_priv(net); |
| 190 | struct net_device *vf_netdev |
| 191 | = rtnl_dereference(net_device_ctx->vf_netdev); |
| 192 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
| 193 | int ret; |
| 194 | |
| 195 | netif_tx_disable(net); |
| 196 | |
| 197 | /* No need to close rndis filter if it is removed already */ |
| 198 | if (!nvdev) |
| 199 | return 0; |
| 200 | |
| 201 | ret = rndis_filter_close(nvdev); |
| 202 | if (ret != 0) { |
| 203 | netdev_err(net, "unable to close device (ret %d).\n", ret); |
| 204 | return ret; |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 205 | } |
| 206 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 207 | ret = netvsc_wait_until_empty(nvdev); |
| 208 | if (ret) |
Haiyang Zhang | 2de8530 | 2015-07-13 13:09:16 -0700 | [diff] [blame] | 209 | netdev_err(net, "Ring buffer not empty after closing rndis\n"); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 210 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 211 | if (vf_netdev) |
| 212 | dev_close(vf_netdev); |
| 213 | |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 214 | return ret; |
| 215 | } |
| 216 | |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 217 | static inline void *init_ppi_data(struct rndis_message *msg, |
| 218 | u32 ppi_size, u32 pkt_type) |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 219 | { |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 220 | struct rndis_packet *rndis_pkt = &msg->msg.pkt; |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 221 | struct rndis_per_packet_info *ppi; |
| 222 | |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 223 | rndis_pkt->data_offset += ppi_size; |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 224 | ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset |
| 225 | + rndis_pkt->per_pkt_info_len; |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 226 | |
| 227 | ppi->size = ppi_size; |
| 228 | ppi->type = pkt_type; |
| 229 | ppi->ppi_offset = sizeof(struct rndis_per_packet_info); |
| 230 | |
| 231 | rndis_pkt->per_pkt_info_len += ppi_size; |
| 232 | |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 233 | return ppi + 1; |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 234 | } |
| 235 | |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 236 | /* Azure hosts don't support non-TCP port numbers in hashing for fragmented |
| 237 | * packets. We can use ethtool to change UDP hash level when necessary. |
Haiyang Zhang | f72860a | 2017-04-12 11:45:18 -0700 | [diff] [blame] | 238 | */ |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 239 | static inline u32 netvsc_get_hash( |
| 240 | struct sk_buff *skb, |
| 241 | const struct net_device_context *ndc) |
Haiyang Zhang | f72860a | 2017-04-12 11:45:18 -0700 | [diff] [blame] | 242 | { |
| 243 | struct flow_keys flow; |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 244 | u32 hash, pkt_proto = 0; |
Haiyang Zhang | f72860a | 2017-04-12 11:45:18 -0700 | [diff] [blame] | 245 | static u32 hashrnd __read_mostly; |
| 246 | |
| 247 | net_get_random_once(&hashrnd, sizeof(hashrnd)); |
| 248 | |
| 249 | if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) |
| 250 | return 0; |
| 251 | |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 252 | switch (flow.basic.ip_proto) { |
| 253 | case IPPROTO_TCP: |
| 254 | if (flow.basic.n_proto == htons(ETH_P_IP)) |
| 255 | pkt_proto = HV_TCP4_L4HASH; |
| 256 | else if (flow.basic.n_proto == htons(ETH_P_IPV6)) |
| 257 | pkt_proto = HV_TCP6_L4HASH; |
| 258 | |
| 259 | break; |
| 260 | |
| 261 | case IPPROTO_UDP: |
| 262 | if (flow.basic.n_proto == htons(ETH_P_IP)) |
| 263 | pkt_proto = HV_UDP4_L4HASH; |
| 264 | else if (flow.basic.n_proto == htons(ETH_P_IPV6)) |
| 265 | pkt_proto = HV_UDP6_L4HASH; |
| 266 | |
| 267 | break; |
| 268 | } |
| 269 | |
| 270 | if (pkt_proto & ndc->l4_hash) { |
Haiyang Zhang | f72860a | 2017-04-12 11:45:18 -0700 | [diff] [blame] | 271 | return skb_get_hash(skb); |
| 272 | } else { |
| 273 | if (flow.basic.n_proto == htons(ETH_P_IP)) |
| 274 | hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); |
| 275 | else if (flow.basic.n_proto == htons(ETH_P_IPV6)) |
| 276 | hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); |
| 277 | else |
| 278 | hash = 0; |
| 279 | |
| 280 | skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); |
| 281 | } |
| 282 | |
| 283 | return hash; |
| 284 | } |
| 285 | |
Haiyang Zhang | 8db91f6 | 2017-04-12 11:35:05 -0700 | [diff] [blame] | 286 | static inline int netvsc_get_tx_queue(struct net_device *ndev, |
| 287 | struct sk_buff *skb, int old_idx) |
| 288 | { |
| 289 | const struct net_device_context *ndc = netdev_priv(ndev); |
| 290 | struct sock *sk = skb->sk; |
| 291 | int q_idx; |
| 292 | |
Haiyang Zhang | 39e91cf | 2017-10-13 12:28:04 -0700 | [diff] [blame] | 293 | q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & |
| 294 | (VRSS_SEND_TAB_SIZE - 1)]; |
Haiyang Zhang | 8db91f6 | 2017-04-12 11:35:05 -0700 | [diff] [blame] | 295 | |
| 296 | /* If queue index changed record the new value */ |
| 297 | if (q_idx != old_idx && |
| 298 | sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) |
| 299 | sk_tx_queue_set(sk, q_idx); |
| 300 | |
| 301 | return q_idx; |
| 302 | } |
| 303 | |
stephen hemminger | d8e18ee | 2017-01-24 13:06:05 -0800 | [diff] [blame] | 304 | /* |
| 305 | * Select queue for transmit. |
| 306 | * |
| 307 | * If a valid queue has already been assigned, then use that. |
| 308 | * Otherwise compute tx queue based on hash and the send table. |
| 309 | * |
| 310 | * This is basically similar to default (__netdev_pick_tx) with the added step |
| 311 | * of using the host send_table when no other queue has been assigned. |
| 312 | * |
| 313 | * TODO support XPS - but get_xps_queue not exported |
| 314 | */ |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 315 | static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 316 | { |
Haiyang Zhang | 8db91f6 | 2017-04-12 11:35:05 -0700 | [diff] [blame] | 317 | int q_idx = sk_tx_queue_get(skb->sk); |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 318 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 319 | if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { |
Haiyang Zhang | 8db91f6 | 2017-04-12 11:35:05 -0700 | [diff] [blame] | 320 | /* If forwarding a packet, we use the recorded queue when |
| 321 | * available for better cache locality. |
| 322 | */ |
| 323 | if (skb_rx_queue_recorded(skb)) |
| 324 | q_idx = skb_get_rx_queue(skb); |
| 325 | else |
| 326 | q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); |
stephen hemminger | d8e18ee | 2017-01-24 13:06:05 -0800 | [diff] [blame] | 327 | } |
| 328 | |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 329 | return q_idx; |
| 330 | } |
| 331 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 332 | static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, |
Alexander Duyck | 4f49dec | 2018-07-09 12:19:59 -0400 | [diff] [blame] | 333 | struct net_device *sb_dev, |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 334 | select_queue_fallback_t fallback) |
| 335 | { |
| 336 | struct net_device_context *ndc = netdev_priv(ndev); |
| 337 | struct net_device *vf_netdev; |
| 338 | u16 txq; |
| 339 | |
| 340 | rcu_read_lock(); |
| 341 | vf_netdev = rcu_dereference(ndc->vf_netdev); |
| 342 | if (vf_netdev) { |
Stephen Hemminger | b3bf566 | 2018-03-02 13:49:07 -0800 | [diff] [blame] | 343 | const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; |
| 344 | |
| 345 | if (vf_ops->ndo_select_queue) |
| 346 | txq = vf_ops->ndo_select_queue(vf_netdev, skb, |
Alexander Duyck | 4f49dec | 2018-07-09 12:19:59 -0400 | [diff] [blame] | 347 | sb_dev, fallback); |
Stephen Hemminger | b3bf566 | 2018-03-02 13:49:07 -0800 | [diff] [blame] | 348 | else |
Alexander Duyck | 8ec56fc | 2018-07-09 12:20:04 -0400 | [diff] [blame] | 349 | txq = fallback(vf_netdev, skb, NULL); |
Stephen Hemminger | b3bf566 | 2018-03-02 13:49:07 -0800 | [diff] [blame] | 350 | |
| 351 | /* Record the queue selected by VF so that it can be |
| 352 | * used for common case where VF has more queues than |
| 353 | * the synthetic device. |
| 354 | */ |
| 355 | qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 356 | } else { |
| 357 | txq = netvsc_pick_tx(ndev, skb); |
| 358 | } |
| 359 | rcu_read_unlock(); |
| 360 | |
| 361 | while (unlikely(txq >= ndev->real_num_tx_queues)) |
| 362 | txq -= ndev->real_num_tx_queues; |
| 363 | |
| 364 | return txq; |
| 365 | } |
| 366 | |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 367 | static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, |
stephen hemminger | 89bb42b | 2017-08-09 17:46:08 -0700 | [diff] [blame] | 368 | struct hv_page_buffer *pb) |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 369 | { |
| 370 | int j = 0; |
| 371 | |
| 372 | /* Deal with compund pages by ignoring unused part |
| 373 | * of the page. |
| 374 | */ |
| 375 | page += (offset >> PAGE_SHIFT); |
| 376 | offset &= ~PAGE_MASK; |
| 377 | |
| 378 | while (len > 0) { |
| 379 | unsigned long bytes; |
| 380 | |
| 381 | bytes = PAGE_SIZE - offset; |
| 382 | if (bytes > len) |
| 383 | bytes = len; |
| 384 | pb[j].pfn = page_to_pfn(page); |
| 385 | pb[j].offset = offset; |
| 386 | pb[j].len = bytes; |
| 387 | |
| 388 | offset += bytes; |
| 389 | len -= bytes; |
| 390 | |
| 391 | if (offset == PAGE_SIZE && len) { |
| 392 | page++; |
| 393 | offset = 0; |
| 394 | j++; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | return j + 1; |
| 399 | } |
| 400 | |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 401 | static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, |
KY Srinivasan | a9f2e2d | 2015-12-01 16:43:13 -0800 | [diff] [blame] | 402 | struct hv_netvsc_packet *packet, |
stephen hemminger | 02b6de0 | 2017-07-28 08:59:44 -0700 | [diff] [blame] | 403 | struct hv_page_buffer *pb) |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 404 | { |
| 405 | u32 slots_used = 0; |
| 406 | char *data = skb->data; |
| 407 | int frags = skb_shinfo(skb)->nr_frags; |
| 408 | int i; |
| 409 | |
| 410 | /* The packet is laid out thus: |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 411 | * 1. hdr: RNDIS header and PPI |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 412 | * 2. skb linear data |
| 413 | * 3. skb fragment data |
| 414 | */ |
stephen hemminger | ea5a32c | 2017-08-09 17:46:10 -0700 | [diff] [blame] | 415 | slots_used += fill_pg_buf(virt_to_page(hdr), |
| 416 | offset_in_page(hdr), |
| 417 | len, &pb[slots_used]); |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 418 | |
Haiyang Zhang | aa0a34b | 2015-04-13 16:34:35 -0700 | [diff] [blame] | 419 | packet->rmsg_size = len; |
| 420 | packet->rmsg_pgcnt = slots_used; |
| 421 | |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 422 | slots_used += fill_pg_buf(virt_to_page(data), |
| 423 | offset_in_page(data), |
| 424 | skb_headlen(skb), &pb[slots_used]); |
| 425 | |
| 426 | for (i = 0; i < frags; i++) { |
| 427 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; |
| 428 | |
| 429 | slots_used += fill_pg_buf(skb_frag_page(frag), |
| 430 | frag->page_offset, |
| 431 | skb_frag_size(frag), &pb[slots_used]); |
| 432 | } |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 433 | return slots_used; |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 434 | } |
| 435 | |
stephen hemminger | 80d887d | 2017-07-24 21:03:19 -0700 | [diff] [blame] | 436 | static int count_skb_frag_slots(struct sk_buff *skb) |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 437 | { |
stephen hemminger | 80d887d | 2017-07-24 21:03:19 -0700 | [diff] [blame] | 438 | int i, frags = skb_shinfo(skb)->nr_frags; |
| 439 | int pages = 0; |
| 440 | |
| 441 | for (i = 0; i < frags; i++) { |
| 442 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; |
| 443 | unsigned long size = skb_frag_size(frag); |
| 444 | unsigned long offset = frag->page_offset; |
| 445 | |
| 446 | /* Skip unused frames from start of page */ |
| 447 | offset &= ~PAGE_MASK; |
| 448 | pages += PFN_UP(offset + size); |
| 449 | } |
| 450 | return pages; |
| 451 | } |
| 452 | |
| 453 | static int netvsc_get_slots(struct sk_buff *skb) |
| 454 | { |
| 455 | char *data = skb->data; |
| 456 | unsigned int offset = offset_in_page(data); |
| 457 | unsigned int len = skb_headlen(skb); |
| 458 | int slots; |
| 459 | int frag_slots; |
| 460 | |
| 461 | slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); |
| 462 | frag_slots = count_skb_frag_slots(skb); |
| 463 | return slots + frag_slots; |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 464 | } |
| 465 | |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 466 | static u32 net_checksum_info(struct sk_buff *skb) |
KY Srinivasan | 08cd04b | 2014-03-08 19:23:17 -0800 | [diff] [blame] | 467 | { |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 468 | if (skb->protocol == htons(ETH_P_IP)) { |
| 469 | struct iphdr *ip = ip_hdr(skb); |
KY Srinivasan | 08cd04b | 2014-03-08 19:23:17 -0800 | [diff] [blame] | 470 | |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 471 | if (ip->protocol == IPPROTO_TCP) |
| 472 | return TRANSPORT_INFO_IPV4_TCP; |
| 473 | else if (ip->protocol == IPPROTO_UDP) |
| 474 | return TRANSPORT_INFO_IPV4_UDP; |
KY Srinivasan | 08cd04b | 2014-03-08 19:23:17 -0800 | [diff] [blame] | 475 | } else { |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 476 | struct ipv6hdr *ip6 = ipv6_hdr(skb); |
| 477 | |
| 478 | if (ip6->nexthdr == IPPROTO_TCP) |
| 479 | return TRANSPORT_INFO_IPV6_TCP; |
Mohammed Gamal | 37b9dfa | 2017-07-24 10:57:26 -0700 | [diff] [blame] | 480 | else if (ip6->nexthdr == IPPROTO_UDP) |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 481 | return TRANSPORT_INFO_IPV6_UDP; |
KY Srinivasan | 08cd04b | 2014-03-08 19:23:17 -0800 | [diff] [blame] | 482 | } |
| 483 | |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 484 | return TRANSPORT_INFO_NOT_IP; |
KY Srinivasan | 08cd04b | 2014-03-08 19:23:17 -0800 | [diff] [blame] | 485 | } |
| 486 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 487 | /* Send skb on the slave VF device. */ |
| 488 | static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, |
| 489 | struct sk_buff *skb) |
| 490 | { |
| 491 | struct net_device_context *ndev_ctx = netdev_priv(net); |
| 492 | unsigned int len = skb->len; |
| 493 | int rc; |
| 494 | |
| 495 | skb->dev = vf_netdev; |
| 496 | skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; |
| 497 | |
| 498 | rc = dev_queue_xmit(skb); |
| 499 | if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { |
| 500 | struct netvsc_vf_pcpu_stats *pcpu_stats |
| 501 | = this_cpu_ptr(ndev_ctx->vf_stats); |
| 502 | |
| 503 | u64_stats_update_begin(&pcpu_stats->syncp); |
| 504 | pcpu_stats->tx_packets++; |
| 505 | pcpu_stats->tx_bytes += len; |
| 506 | u64_stats_update_end(&pcpu_stats->syncp); |
| 507 | } else { |
| 508 | this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); |
| 509 | } |
| 510 | |
| 511 | return rc; |
| 512 | } |
| 513 | |
Greg Kroah-Hartman | 02fafbc | 2009-08-31 21:09:45 -0700 | [diff] [blame] | 514 | static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 515 | { |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 516 | struct net_device_context *net_device_ctx = netdev_priv(net); |
Vitaly Kuznetsov | 981a1bd | 2015-04-08 17:54:05 +0200 | [diff] [blame] | 517 | struct hv_netvsc_packet *packet = NULL; |
Greg Kroah-Hartman | 02fafbc | 2009-08-31 21:09:45 -0700 | [diff] [blame] | 518 | int ret; |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 519 | unsigned int num_data_pgs; |
| 520 | struct rndis_message *rndis_msg; |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 521 | struct net_device *vf_netdev; |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 522 | u32 rndis_msg_size; |
Haiyang Zhang | 307f099 | 2014-05-21 12:55:39 -0700 | [diff] [blame] | 523 | u32 hash; |
stephen hemminger | 02b6de0 | 2017-07-28 08:59:44 -0700 | [diff] [blame] | 524 | struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 525 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 526 | /* if VF is present and up then redirect packets |
| 527 | * already called with rcu_read_lock_bh |
| 528 | */ |
| 529 | vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); |
| 530 | if (vf_netdev && netif_running(vf_netdev) && |
| 531 | !netpoll_tx_running(net)) |
| 532 | return netvsc_vf_xmit(net, vf_netdev, skb); |
| 533 | |
stephen hemminger | 80d887d | 2017-07-24 21:03:19 -0700 | [diff] [blame] | 534 | /* We will atmost need two pages to describe the rndis |
| 535 | * header. We can only transmit MAX_PAGE_BUFFER_COUNT number |
Vitaly Kuznetsov | e88f7e0 | 2015-04-08 17:54:06 +0200 | [diff] [blame] | 536 | * of pages in a single packet. If skb is scattered around |
| 537 | * more pages we try linearizing it. |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 538 | */ |
stephen hemminger | 80d887d | 2017-07-24 21:03:19 -0700 | [diff] [blame] | 539 | |
| 540 | num_data_pgs = netvsc_get_slots(skb) + 2; |
| 541 | |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 542 | if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 543 | ++net_device_ctx->eth_stats.tx_scattered; |
| 544 | |
| 545 | if (skb_linearize(skb)) |
| 546 | goto no_memory; |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 547 | |
stephen hemminger | 80d887d | 2017-07-24 21:03:19 -0700 | [diff] [blame] | 548 | num_data_pgs = netvsc_get_slots(skb) + 2; |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 549 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 550 | ++net_device_ctx->eth_stats.tx_too_big; |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 551 | goto drop; |
| 552 | } |
KY Srinivasan | 54a7357 | 2014-03-08 19:23:13 -0800 | [diff] [blame] | 553 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 554 | |
KY Srinivasan | c0eb454 | 2015-12-01 16:43:10 -0800 | [diff] [blame] | 555 | /* |
| 556 | * Place the rndis header in the skb head room and |
| 557 | * the skb->cb will be used for hv_netvsc_packet |
| 558 | * structure. |
| 559 | */ |
| 560 | ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 561 | if (ret) |
| 562 | goto no_memory; |
| 563 | |
KY Srinivasan | c0eb454 | 2015-12-01 16:43:10 -0800 | [diff] [blame] | 564 | /* Use the skb control buffer for building up the packet */ |
| 565 | BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > |
| 566 | FIELD_SIZEOF(struct sk_buff, cb)); |
| 567 | packet = (struct hv_netvsc_packet *)skb->cb; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 568 | |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 569 | packet->q_idx = skb_get_queue_mapping(skb); |
| 570 | |
Haiyang Zhang | 4d447c9 | 2011-12-15 13:45:17 -0800 | [diff] [blame] | 571 | packet->total_data_buflen = skb->len; |
stephen hemminger | 793e395 | 2017-01-24 13:06:12 -0800 | [diff] [blame] | 572 | packet->total_bytes = skb->len; |
| 573 | packet->total_packets = 1; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 574 | |
KY Srinivasan | c0eb454 | 2015-12-01 16:43:10 -0800 | [diff] [blame] | 575 | rndis_msg = (struct rndis_message *)skb->head; |
KY Srinivasan | b08cc79 | 2015-03-29 21:08:42 -0700 | [diff] [blame] | 576 | |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 577 | /* Add the rndis header */ |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 578 | rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; |
| 579 | rndis_msg->msg_len = packet->total_data_buflen; |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 580 | |
| 581 | rndis_msg->msg.pkt = (struct rndis_packet) { |
| 582 | .data_offset = sizeof(struct rndis_packet), |
| 583 | .data_len = packet->total_data_buflen, |
| 584 | .per_pkt_info_offset = sizeof(struct rndis_packet), |
| 585 | }; |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 586 | |
| 587 | rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); |
| 588 | |
Haiyang Zhang | 307f099 | 2014-05-21 12:55:39 -0700 | [diff] [blame] | 589 | hash = skb_get_hash_raw(skb); |
| 590 | if (hash != 0 && net->real_num_tx_queues > 1) { |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 591 | u32 *hash_info; |
| 592 | |
Haiyang Zhang | 307f099 | 2014-05-21 12:55:39 -0700 | [diff] [blame] | 593 | rndis_msg_size += NDIS_HASH_PPI_SIZE; |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 594 | hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, |
| 595 | NBL_HASH_VALUE); |
| 596 | *hash_info = hash; |
Haiyang Zhang | 307f099 | 2014-05-21 12:55:39 -0700 | [diff] [blame] | 597 | } |
| 598 | |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 599 | if (skb_vlan_tag_present(skb)) { |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 600 | struct ndis_pkt_8021q_info *vlan; |
| 601 | |
| 602 | rndis_msg_size += NDIS_VLAN_PPI_SIZE; |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 603 | vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, |
| 604 | IEEE_8021Q_INFO); |
stephen hemminger | 00f5024 | 2017-08-09 17:46:09 -0700 | [diff] [blame] | 605 | |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 606 | vlan->value = 0; |
KY Srinivasan | 760d1e3 | 2015-12-01 16:43:19 -0800 | [diff] [blame] | 607 | vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; |
| 608 | vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 609 | VLAN_PRIO_SHIFT; |
| 610 | } |
| 611 | |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 612 | if (skb_is_gso(skb)) { |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 613 | struct ndis_tcp_lso_info *lso_info; |
| 614 | |
| 615 | rndis_msg_size += NDIS_LSO_PPI_SIZE; |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 616 | lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, |
| 617 | TCP_LARGESEND_PKTINFO); |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 618 | |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 619 | lso_info->value = 0; |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 620 | lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 621 | if (skb->protocol == htons(ETH_P_IP)) { |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 622 | lso_info->lso_v2_transmit.ip_version = |
| 623 | NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; |
| 624 | ip_hdr(skb)->tot_len = 0; |
| 625 | ip_hdr(skb)->check = 0; |
| 626 | tcp_hdr(skb)->check = |
| 627 | ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
| 628 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
| 629 | } else { |
| 630 | lso_info->lso_v2_transmit.ip_version = |
| 631 | NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; |
| 632 | ipv6_hdr(skb)->payload_len = 0; |
| 633 | tcp_hdr(skb)->check = |
| 634 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 635 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
| 636 | } |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 637 | lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb); |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 638 | lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; |
stephen hemminger | ad19bc8 | 2016-10-11 14:03:07 -0700 | [diff] [blame] | 639 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 640 | if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) { |
| 641 | struct ndis_tcp_ip_checksum_info *csum_info; |
| 642 | |
stephen hemminger | ad19bc8 | 2016-10-11 14:03:07 -0700 | [diff] [blame] | 643 | rndis_msg_size += NDIS_CSUM_PPI_SIZE; |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 644 | csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, |
| 645 | TCPIP_CHKSUM_PKTINFO); |
stephen hemminger | ad19bc8 | 2016-10-11 14:03:07 -0700 | [diff] [blame] | 646 | |
Stephen Hemminger | f5a2255 | 2017-12-01 11:01:48 -0800 | [diff] [blame] | 647 | csum_info->value = 0; |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 648 | csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); |
| 649 | |
| 650 | if (skb->protocol == htons(ETH_P_IP)) { |
stephen hemminger | ad19bc8 | 2016-10-11 14:03:07 -0700 | [diff] [blame] | 651 | csum_info->transmit.is_ipv4 = 1; |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 652 | |
| 653 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
| 654 | csum_info->transmit.tcp_checksum = 1; |
| 655 | else |
| 656 | csum_info->transmit.udp_checksum = 1; |
| 657 | } else { |
stephen hemminger | ad19bc8 | 2016-10-11 14:03:07 -0700 | [diff] [blame] | 658 | csum_info->transmit.is_ipv6 = 1; |
| 659 | |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 660 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
| 661 | csum_info->transmit.tcp_checksum = 1; |
| 662 | else |
| 663 | csum_info->transmit.udp_checksum = 1; |
| 664 | } |
stephen hemminger | ad19bc8 | 2016-10-11 14:03:07 -0700 | [diff] [blame] | 665 | } else { |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 666 | /* Can't do offload of this type of checksum */ |
stephen hemminger | ad19bc8 | 2016-10-11 14:03:07 -0700 | [diff] [blame] | 667 | if (skb_checksum_help(skb)) |
| 668 | goto drop; |
| 669 | } |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 670 | } |
KY Srinivasan | 08cd04b | 2014-03-08 19:23:17 -0800 | [diff] [blame] | 671 | |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 672 | /* Start filling in the page buffers with the rndis hdr */ |
| 673 | rndis_msg->msg_len += rndis_msg_size; |
Haiyang Zhang | 942396b | 2014-10-22 13:47:18 -0700 | [diff] [blame] | 674 | packet->total_data_buflen = rndis_msg->msg_len; |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 675 | packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, |
stephen hemminger | 02b6de0 | 2017-07-28 08:59:44 -0700 | [diff] [blame] | 676 | skb, packet, pb); |
KY Srinivasan | 8a00251 | 2014-03-08 19:23:14 -0800 | [diff] [blame] | 677 | |
sixiao@microsoft.com | 76d13b5 | 2016-02-17 16:43:59 -0800 | [diff] [blame] | 678 | /* timestamp packet in software */ |
| 679 | skb_tx_timestamp(skb); |
stephen hemminger | 2a926f7 | 2017-07-19 11:53:17 -0700 | [diff] [blame] | 680 | |
Stephen Hemminger | cfd8afd | 2017-12-12 16:48:40 -0800 | [diff] [blame] | 681 | ret = netvsc_send(net, packet, rndis_msg, pb, skb); |
stephen hemminger | 793e395 | 2017-01-24 13:06:12 -0800 | [diff] [blame] | 682 | if (likely(ret == 0)) |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 683 | return NETDEV_TX_OK; |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 684 | |
| 685 | if (ret == -EAGAIN) { |
| 686 | ++net_device_ctx->eth_stats.tx_busy; |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 687 | return NETDEV_TX_BUSY; |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 688 | } |
| 689 | |
| 690 | if (ret == -ENOSPC) |
| 691 | ++net_device_ctx->eth_stats.tx_no_space; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 692 | |
Stephen Hemminger | 0ab0514 | 2016-08-23 12:17:52 -0700 | [diff] [blame] | 693 | drop: |
| 694 | dev_kfree_skb_any(skb); |
| 695 | net->stats.tx_dropped++; |
| 696 | |
| 697 | return NETDEV_TX_OK; |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 698 | |
| 699 | no_memory: |
| 700 | ++net_device_ctx->eth_stats.tx_no_memory; |
| 701 | goto drop; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 702 | } |
stephen hemminger | 89bb42b | 2017-08-09 17:46:08 -0700 | [diff] [blame] | 703 | |
Hank Janssen | 3e18951 | 2010-03-04 22:11:00 +0000 | [diff] [blame] | 704 | /* |
Greg Kroah-Hartman | 02fafbc | 2009-08-31 21:09:45 -0700 | [diff] [blame] | 705 | * netvsc_linkstatus_callback - Link up/down notification |
| 706 | */ |
Stephen Hemminger | 79cf1ba | 2017-12-12 16:48:37 -0800 | [diff] [blame] | 707 | void netvsc_linkstatus_callback(struct net_device *net, |
Haiyang Zhang | 3a494e7 | 2014-06-19 18:34:36 -0700 | [diff] [blame] | 708 | struct rndis_message *resp) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 709 | { |
Haiyang Zhang | 3a494e7 | 2014-06-19 18:34:36 -0700 | [diff] [blame] | 710 | struct rndis_indicate_status *indicate = &resp->msg.indicate_status; |
Stephen Hemminger | 79cf1ba | 2017-12-12 16:48:37 -0800 | [diff] [blame] | 711 | struct net_device_context *ndev_ctx = netdev_priv(net); |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 712 | struct netvsc_reconfig *event; |
| 713 | unsigned long flags; |
| 714 | |
Haiyang Zhang | 7f5d5af | 2016-08-04 10:42:15 -0700 | [diff] [blame] | 715 | /* Update the physical link speed when changing to another vSwitch */ |
| 716 | if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { |
| 717 | u32 speed; |
| 718 | |
stephen hemminger | 89bb42b | 2017-08-09 17:46:08 -0700 | [diff] [blame] | 719 | speed = *(u32 *)((void *)indicate |
| 720 | + indicate->status_buf_offset) / 10000; |
Haiyang Zhang | 7f5d5af | 2016-08-04 10:42:15 -0700 | [diff] [blame] | 721 | ndev_ctx->speed = speed; |
| 722 | return; |
| 723 | } |
| 724 | |
| 725 | /* Handle these link change statuses below */ |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 726 | if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && |
| 727 | indicate->status != RNDIS_STATUS_MEDIA_CONNECT && |
| 728 | indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) |
| 729 | return; |
K. Y. Srinivasan | 2ddd5e5 | 2011-09-13 10:59:49 -0700 | [diff] [blame] | 730 | |
Haiyang Zhang | 7f5d5af | 2016-08-04 10:42:15 -0700 | [diff] [blame] | 731 | if (net->reg_state != NETREG_REGISTERED) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 732 | return; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 733 | |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 734 | event = kzalloc(sizeof(*event), GFP_ATOMIC); |
| 735 | if (!event) |
| 736 | return; |
| 737 | event->event = indicate->status; |
| 738 | |
| 739 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
| 740 | list_add_tail(&event->list, &ndev_ctx->reconfig_events); |
| 741 | spin_unlock_irqrestore(&ndev_ctx->lock, flags); |
| 742 | |
| 743 | schedule_delayed_work(&ndev_ctx->dwork, 0); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 744 | } |
| 745 | |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 746 | static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, |
stephen hemminger | e91e7dd | 2017-02-27 10:26:51 -0800 | [diff] [blame] | 747 | struct napi_struct *napi, |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 748 | const struct ndis_tcp_ip_checksum_info *csum_info, |
| 749 | const struct ndis_pkt_8021q_info *vlan, |
| 750 | void *data, u32 buflen) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 751 | { |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 752 | struct sk_buff *skb; |
K. Y. Srinivasan | 2ddd5e5 | 2011-09-13 10:59:49 -0700 | [diff] [blame] | 753 | |
stephen hemminger | e91e7dd | 2017-02-27 10:26:51 -0800 | [diff] [blame] | 754 | skb = napi_alloc_skb(napi, buflen); |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 755 | if (!skb) |
| 756 | return skb; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 757 | |
Greg Kroah-Hartman | 02fafbc | 2009-08-31 21:09:45 -0700 | [diff] [blame] | 758 | /* |
| 759 | * Copy to skb. This copy is needed here since the memory pointed by |
| 760 | * hv_netvsc_packet cannot be deallocated |
| 761 | */ |
Johannes Berg | 59ae1d1 | 2017-06-16 14:29:20 +0200 | [diff] [blame] | 762 | skb_put_data(skb, data, buflen); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 763 | |
| 764 | skb->protocol = eth_type_trans(skb, net); |
Stephen Hemminger | e52fed7 | 2016-10-23 21:32:47 -0700 | [diff] [blame] | 765 | |
| 766 | /* skb is already created with CHECKSUM_NONE */ |
| 767 | skb_checksum_none_assert(skb); |
| 768 | |
| 769 | /* |
| 770 | * In Linux, the IP checksum is always checked. |
| 771 | * Do L4 checksum offload if enabled and present. |
| 772 | */ |
| 773 | if (csum_info && (net->features & NETIF_F_RXCSUM)) { |
| 774 | if (csum_info->receive.tcp_checksum_succeeded || |
| 775 | csum_info->receive.udp_checksum_succeeded) |
KY Srinivasan | e3d605e | 2014-03-08 19:23:16 -0800 | [diff] [blame] | 776 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
KY Srinivasan | e3d605e | 2014-03-08 19:23:16 -0800 | [diff] [blame] | 777 | } |
| 778 | |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 779 | if (vlan) { |
| 780 | u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); |
| 781 | |
Haiyang Zhang | 93725cb | 2013-06-17 15:36:49 -0700 | [diff] [blame] | 782 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
KY Srinivasan | 760d1e3 | 2015-12-01 16:43:19 -0800 | [diff] [blame] | 783 | vlan_tci); |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 784 | } |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 785 | |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 786 | return skb; |
| 787 | } |
| 788 | |
| 789 | /* |
| 790 | * netvsc_recv_callback - Callback when we receive a packet from the |
| 791 | * "wire" on the specified device. |
| 792 | */ |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 793 | int netvsc_recv_callback(struct net_device *net, |
Stephen Hemminger | 345ac08 | 2017-12-12 16:48:38 -0800 | [diff] [blame] | 794 | struct netvsc_device *net_device, |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 795 | struct vmbus_channel *channel, |
| 796 | void *data, u32 len, |
| 797 | const struct ndis_tcp_ip_checksum_info *csum_info, |
| 798 | const struct ndis_pkt_8021q_info *vlan) |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 799 | { |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 800 | struct net_device_context *net_device_ctx = netdev_priv(net); |
stephen hemminger | 742fe54 | 2017-02-27 10:26:50 -0800 | [diff] [blame] | 801 | u16 q_idx = channel->offermsg.offer.sub_channel_index; |
Stephen Hemminger | 345ac08 | 2017-12-12 16:48:38 -0800 | [diff] [blame] | 802 | struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 803 | struct sk_buff *skb; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 804 | struct netvsc_stats *rx_stats; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 805 | |
Stephen Hemminger | 9cbcc42 | 2016-09-22 16:56:34 -0700 | [diff] [blame] | 806 | if (net->reg_state != NETREG_REGISTERED) |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 807 | return NVSP_STAT_FAIL; |
| 808 | |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 809 | /* Allocate a skb - TODO direct I/O to pages? */ |
stephen hemminger | e91e7dd | 2017-02-27 10:26:51 -0800 | [diff] [blame] | 810 | skb = netvsc_alloc_recv_skb(net, &nvchan->napi, |
| 811 | csum_info, vlan, data, len); |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 812 | if (unlikely(!skb)) { |
Stephen Hemminger | f61a9d6 | 2017-12-12 16:48:36 -0800 | [diff] [blame] | 813 | ++net_device_ctx->eth_stats.rx_no_memory; |
stephen hemminger | 0719e72 | 2017-01-11 09:16:32 -0800 | [diff] [blame] | 814 | rcu_read_unlock(); |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 815 | return NVSP_STAT_FAIL; |
| 816 | } |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 817 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 818 | skb_record_rx_queue(skb, q_idx); |
Stephen Hemminger | 9cbcc42 | 2016-09-22 16:56:34 -0700 | [diff] [blame] | 819 | |
| 820 | /* |
| 821 | * Even if injecting the packet, record the statistics |
| 822 | * on the synthetic device because modifying the VF device |
| 823 | * statistics will not work correctly. |
| 824 | */ |
stephen hemminger | 742fe54 | 2017-02-27 10:26:50 -0800 | [diff] [blame] | 825 | rx_stats = &nvchan->rx_stats; |
sixiao@microsoft.com | 4b02b58 | 2015-05-15 02:33:03 -0700 | [diff] [blame] | 826 | u64_stats_update_begin(&rx_stats->syncp); |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 827 | rx_stats->packets++; |
stephen hemminger | dc54a08 | 2017-01-24 13:06:08 -0800 | [diff] [blame] | 828 | rx_stats->bytes += len; |
Stephen Hemminger | f7ad75b | 2016-09-22 16:56:35 -0700 | [diff] [blame] | 829 | |
| 830 | if (skb->pkt_type == PACKET_BROADCAST) |
| 831 | ++rx_stats->broadcast; |
| 832 | else if (skb->pkt_type == PACKET_MULTICAST) |
| 833 | ++rx_stats->multicast; |
sixiao@microsoft.com | 4b02b58 | 2015-05-15 02:33:03 -0700 | [diff] [blame] | 834 | u64_stats_update_end(&rx_stats->syncp); |
Stephen Hemminger | 9495c28 | 2010-03-09 17:42:17 -0800 | [diff] [blame] | 835 | |
stephen hemminger | 742fe54 | 2017-02-27 10:26:50 -0800 | [diff] [blame] | 836 | napi_gro_receive(&nvchan->napi, skb); |
Haiyang Zhang | 5c71dad | 2018-03-22 12:01:13 -0700 | [diff] [blame] | 837 | return NVSP_STAT_SUCCESS; |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 838 | } |
| 839 | |
Stephen Hemminger | f82f4ad | 2010-05-04 09:58:57 -0700 | [diff] [blame] | 840 | static void netvsc_get_drvinfo(struct net_device *net, |
| 841 | struct ethtool_drvinfo *info) |
| 842 | { |
Jiri Pirko | 7826d43 | 2013-01-06 00:44:26 +0000 | [diff] [blame] | 843 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
Jiri Pirko | 7826d43 | 2013-01-06 00:44:26 +0000 | [diff] [blame] | 844 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); |
Stephen Hemminger | f82f4ad | 2010-05-04 09:58:57 -0700 | [diff] [blame] | 845 | } |
| 846 | |
Andrew Schwartzmeyer | 5999537 | 2015-02-26 16:27:14 -0800 | [diff] [blame] | 847 | static void netvsc_get_channels(struct net_device *net, |
| 848 | struct ethtool_channels *channel) |
| 849 | { |
| 850 | struct net_device_context *net_device_ctx = netdev_priv(net); |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 851 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
Andrew Schwartzmeyer | 5999537 | 2015-02-26 16:27:14 -0800 | [diff] [blame] | 852 | |
| 853 | if (nvdev) { |
| 854 | channel->max_combined = nvdev->max_chn; |
| 855 | channel->combined_count = nvdev->num_chn; |
| 856 | } |
| 857 | } |
| 858 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 859 | static int netvsc_detach(struct net_device *ndev, |
| 860 | struct netvsc_device *nvdev) |
| 861 | { |
| 862 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| 863 | struct hv_device *hdev = ndev_ctx->device_ctx; |
| 864 | int ret; |
| 865 | |
| 866 | /* Don't try continuing to try and setup sub channels */ |
| 867 | if (cancel_work_sync(&nvdev->subchan_work)) |
| 868 | nvdev->num_chn = 1; |
| 869 | |
| 870 | /* If device was up (receiving) then shutdown */ |
| 871 | if (netif_running(ndev)) { |
| 872 | netif_tx_disable(ndev); |
| 873 | |
| 874 | ret = rndis_filter_close(nvdev); |
| 875 | if (ret) { |
| 876 | netdev_err(ndev, |
| 877 | "unable to close device (ret %d).\n", ret); |
| 878 | return ret; |
| 879 | } |
| 880 | |
| 881 | ret = netvsc_wait_until_empty(nvdev); |
| 882 | if (ret) { |
| 883 | netdev_err(ndev, |
| 884 | "Ring buffer not empty after closing rndis\n"); |
| 885 | return ret; |
| 886 | } |
| 887 | } |
| 888 | |
| 889 | netif_device_detach(ndev); |
| 890 | |
| 891 | rndis_filter_device_remove(hdev, nvdev); |
| 892 | |
| 893 | return 0; |
| 894 | } |
| 895 | |
| 896 | static int netvsc_attach(struct net_device *ndev, |
| 897 | struct netvsc_device_info *dev_info) |
| 898 | { |
| 899 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| 900 | struct hv_device *hdev = ndev_ctx->device_ctx; |
| 901 | struct netvsc_device *nvdev; |
| 902 | struct rndis_device *rdev; |
| 903 | int ret; |
| 904 | |
| 905 | nvdev = rndis_filter_device_add(hdev, dev_info); |
| 906 | if (IS_ERR(nvdev)) |
| 907 | return PTR_ERR(nvdev); |
| 908 | |
Stephen Hemminger | 3ffe64f | 2018-06-29 14:07:16 -0700 | [diff] [blame] | 909 | if (nvdev->num_chn > 1) { |
| 910 | ret = rndis_set_subchannel(ndev, nvdev); |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 911 | |
Stephen Hemminger | 3ffe64f | 2018-06-29 14:07:16 -0700 | [diff] [blame] | 912 | /* if unavailable, just proceed with one queue */ |
| 913 | if (ret) { |
| 914 | nvdev->max_chn = 1; |
| 915 | nvdev->num_chn = 1; |
| 916 | } |
| 917 | } |
| 918 | |
| 919 | /* In any case device is now ready */ |
| 920 | netif_device_attach(ndev); |
| 921 | |
| 922 | /* Note: enable and attach happen when sub-channels setup */ |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 923 | netif_carrier_off(ndev); |
| 924 | |
| 925 | if (netif_running(ndev)) { |
| 926 | ret = rndis_filter_open(nvdev); |
| 927 | if (ret) |
| 928 | return ret; |
| 929 | |
| 930 | rdev = nvdev->extension; |
| 931 | if (!rdev->link_state) |
| 932 | netif_carrier_on(ndev); |
| 933 | } |
| 934 | |
| 935 | return 0; |
| 936 | } |
| 937 | |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 938 | static int netvsc_set_channels(struct net_device *net, |
| 939 | struct ethtool_channels *channels) |
| 940 | { |
| 941 | struct net_device_context *net_device_ctx = netdev_priv(net); |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 942 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
stephen hemminger | 7ca4593 | 2017-07-24 10:57:28 -0700 | [diff] [blame] | 943 | unsigned int orig, count = channels->combined_count; |
| 944 | struct netvsc_device_info device_info; |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 945 | int ret; |
stephen hemminger | 2b01888 | 2017-01-24 13:06:03 -0800 | [diff] [blame] | 946 | |
| 947 | /* We do not support separate count for rx, tx, or other */ |
| 948 | if (count == 0 || |
| 949 | channels->rx_count || channels->tx_count || channels->other_count) |
| 950 | return -EINVAL; |
| 951 | |
stephen hemminger | a0be450 | 2017-03-22 14:51:01 -0700 | [diff] [blame] | 952 | if (!nvdev || nvdev->destroy) |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 953 | return -ENODEV; |
| 954 | |
stephen hemminger | 2b01888 | 2017-01-24 13:06:03 -0800 | [diff] [blame] | 955 | if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 956 | return -EINVAL; |
| 957 | |
stephen hemminger | 2b01888 | 2017-01-24 13:06:03 -0800 | [diff] [blame] | 958 | if (count > nvdev->max_chn) |
| 959 | return -EINVAL; |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 960 | |
stephen hemminger | 7ca4593 | 2017-07-24 10:57:28 -0700 | [diff] [blame] | 961 | orig = nvdev->num_chn; |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 962 | |
stephen hemminger | 7ca4593 | 2017-07-24 10:57:28 -0700 | [diff] [blame] | 963 | memset(&device_info, 0, sizeof(device_info)); |
| 964 | device_info.num_chn = count; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 965 | device_info.send_sections = nvdev->send_section_cnt; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 966 | device_info.send_section_size = nvdev->send_section_size; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 967 | device_info.recv_sections = nvdev->recv_section_cnt; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 968 | device_info.recv_section_size = nvdev->recv_section_size; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 969 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 970 | ret = netvsc_detach(net, nvdev); |
| 971 | if (ret) |
| 972 | return ret; |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 973 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 974 | ret = netvsc_attach(net, &device_info); |
| 975 | if (ret) { |
stephen hemminger | 7ca4593 | 2017-07-24 10:57:28 -0700 | [diff] [blame] | 976 | device_info.num_chn = orig; |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 977 | if (netvsc_attach(net, &device_info)) |
| 978 | netdev_err(net, "restoring channel setting failed\n"); |
stephen hemminger | 7ca4593 | 2017-07-24 10:57:28 -0700 | [diff] [blame] | 979 | } |
| 980 | |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 981 | return ret; |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 982 | } |
| 983 | |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 984 | static bool |
| 985 | netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd) |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 986 | { |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 987 | struct ethtool_link_ksettings diff1 = *cmd; |
| 988 | struct ethtool_link_ksettings diff2 = {}; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 989 | |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 990 | diff1.base.speed = 0; |
| 991 | diff1.base.duplex = 0; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 992 | /* advertising and cmd are usually set */ |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 993 | ethtool_link_ksettings_zero_link_mode(&diff1, advertising); |
| 994 | diff1.base.cmd = 0; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 995 | /* We set port to PORT_OTHER */ |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 996 | diff2.base.port = PORT_OTHER; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 997 | |
| 998 | return !memcmp(&diff1, &diff2, sizeof(diff1)); |
| 999 | } |
| 1000 | |
| 1001 | static void netvsc_init_settings(struct net_device *dev) |
| 1002 | { |
| 1003 | struct net_device_context *ndc = netdev_priv(dev); |
| 1004 | |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1005 | ndc->l4_hash = HV_DEFAULT_L4HASH; |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1006 | |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1007 | ndc->speed = SPEED_UNKNOWN; |
Simon Xiao | f3c9d40e | 2017-04-14 14:42:58 -0700 | [diff] [blame] | 1008 | ndc->duplex = DUPLEX_FULL; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1009 | } |
| 1010 | |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 1011 | static int netvsc_get_link_ksettings(struct net_device *dev, |
| 1012 | struct ethtool_link_ksettings *cmd) |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1013 | { |
| 1014 | struct net_device_context *ndc = netdev_priv(dev); |
| 1015 | |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 1016 | cmd->base.speed = ndc->speed; |
| 1017 | cmd->base.duplex = ndc->duplex; |
| 1018 | cmd->base.port = PORT_OTHER; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1019 | |
| 1020 | return 0; |
| 1021 | } |
| 1022 | |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 1023 | static int netvsc_set_link_ksettings(struct net_device *dev, |
| 1024 | const struct ethtool_link_ksettings *cmd) |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1025 | { |
| 1026 | struct net_device_context *ndc = netdev_priv(dev); |
| 1027 | u32 speed; |
| 1028 | |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 1029 | speed = cmd->base.speed; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1030 | if (!ethtool_validate_speed(speed) || |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 1031 | !ethtool_validate_duplex(cmd->base.duplex) || |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1032 | !netvsc_validate_ethtool_ss_cmd(cmd)) |
| 1033 | return -EINVAL; |
| 1034 | |
| 1035 | ndc->speed = speed; |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 1036 | ndc->duplex = cmd->base.duplex; |
sixiao@microsoft.com | 49eb938 | 2016-02-25 15:24:08 -0800 | [diff] [blame] | 1037 | |
| 1038 | return 0; |
| 1039 | } |
| 1040 | |
Haiyang Zhang | 4d447c9 | 2011-12-15 13:45:17 -0800 | [diff] [blame] | 1041 | static int netvsc_change_mtu(struct net_device *ndev, int mtu) |
| 1042 | { |
| 1043 | struct net_device_context *ndevctx = netdev_priv(ndev); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1044 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1045 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1046 | int orig_mtu = ndev->mtu; |
Haiyang Zhang | 4d447c9 | 2011-12-15 13:45:17 -0800 | [diff] [blame] | 1047 | struct netvsc_device_info device_info; |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1048 | int ret = 0; |
Haiyang Zhang | 4d447c9 | 2011-12-15 13:45:17 -0800 | [diff] [blame] | 1049 | |
stephen hemminger | a0be450 | 2017-03-22 14:51:01 -0700 | [diff] [blame] | 1050 | if (!nvdev || nvdev->destroy) |
Haiyang Zhang | 4d447c9 | 2011-12-15 13:45:17 -0800 | [diff] [blame] | 1051 | return -ENODEV; |
| 1052 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1053 | /* Change MTU of underlying VF netdev first. */ |
| 1054 | if (vf_netdev) { |
| 1055 | ret = dev_set_mtu(vf_netdev, mtu); |
| 1056 | if (ret) |
| 1057 | return ret; |
| 1058 | } |
| 1059 | |
Andrew Schwartzmeyer | 8ebdcc5 | 2015-08-11 17:14:31 -0700 | [diff] [blame] | 1060 | memset(&device_info, 0, sizeof(device_info)); |
stephen hemminger | 2b01888 | 2017-01-24 13:06:03 -0800 | [diff] [blame] | 1061 | device_info.num_chn = nvdev->num_chn; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1062 | device_info.send_sections = nvdev->send_section_cnt; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 1063 | device_info.send_section_size = nvdev->send_section_size; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1064 | device_info.recv_sections = nvdev->recv_section_cnt; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 1065 | device_info.recv_section_size = nvdev->recv_section_size; |
Dexuan Cui | 152669b | 2017-03-02 13:00:53 +0000 | [diff] [blame] | 1066 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1067 | ret = netvsc_detach(ndev, nvdev); |
| 1068 | if (ret) |
| 1069 | goto rollback_vf; |
Dexuan Cui | 152669b | 2017-03-02 13:00:53 +0000 | [diff] [blame] | 1070 | |
Dexuan Cui | 152669b | 2017-03-02 13:00:53 +0000 | [diff] [blame] | 1071 | ndev->mtu = mtu; |
| 1072 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1073 | ret = netvsc_attach(ndev, &device_info); |
| 1074 | if (ret) |
| 1075 | goto rollback; |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1076 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1077 | return 0; |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1078 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1079 | rollback: |
| 1080 | /* Attempt rollback to original MTU */ |
| 1081 | ndev->mtu = orig_mtu; |
stephen hemminger | 68d715f | 2017-08-09 17:46:06 -0700 | [diff] [blame] | 1082 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1083 | if (netvsc_attach(ndev, &device_info)) |
| 1084 | netdev_err(ndev, "restoring mtu failed\n"); |
| 1085 | rollback_vf: |
| 1086 | if (vf_netdev) |
| 1087 | dev_set_mtu(vf_netdev, orig_mtu); |
Vitaly Kuznetsov | 1bdcec8 | 2016-05-13 13:55:21 +0200 | [diff] [blame] | 1088 | |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 1089 | return ret; |
Haiyang Zhang | 4d447c9 | 2011-12-15 13:45:17 -0800 | [diff] [blame] | 1090 | } |
| 1091 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1092 | static void netvsc_get_vf_stats(struct net_device *net, |
| 1093 | struct netvsc_vf_pcpu_stats *tot) |
| 1094 | { |
| 1095 | struct net_device_context *ndev_ctx = netdev_priv(net); |
| 1096 | int i; |
| 1097 | |
| 1098 | memset(tot, 0, sizeof(*tot)); |
| 1099 | |
| 1100 | for_each_possible_cpu(i) { |
| 1101 | const struct netvsc_vf_pcpu_stats *stats |
| 1102 | = per_cpu_ptr(ndev_ctx->vf_stats, i); |
| 1103 | u64 rx_packets, rx_bytes, tx_packets, tx_bytes; |
| 1104 | unsigned int start; |
| 1105 | |
| 1106 | do { |
| 1107 | start = u64_stats_fetch_begin_irq(&stats->syncp); |
| 1108 | rx_packets = stats->rx_packets; |
| 1109 | tx_packets = stats->tx_packets; |
| 1110 | rx_bytes = stats->rx_bytes; |
| 1111 | tx_bytes = stats->tx_bytes; |
| 1112 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); |
| 1113 | |
| 1114 | tot->rx_packets += rx_packets; |
| 1115 | tot->tx_packets += tx_packets; |
| 1116 | tot->rx_bytes += rx_bytes; |
| 1117 | tot->tx_bytes += tx_bytes; |
| 1118 | tot->tx_dropped += stats->tx_dropped; |
| 1119 | } |
| 1120 | } |
| 1121 | |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1122 | static void netvsc_get_pcpu_stats(struct net_device *net, |
| 1123 | struct netvsc_ethtool_pcpu_stats *pcpu_tot) |
| 1124 | { |
| 1125 | struct net_device_context *ndev_ctx = netdev_priv(net); |
| 1126 | struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); |
| 1127 | int i; |
| 1128 | |
| 1129 | /* fetch percpu stats of vf */ |
| 1130 | for_each_possible_cpu(i) { |
| 1131 | const struct netvsc_vf_pcpu_stats *stats = |
| 1132 | per_cpu_ptr(ndev_ctx->vf_stats, i); |
| 1133 | struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i]; |
| 1134 | unsigned int start; |
| 1135 | |
| 1136 | do { |
| 1137 | start = u64_stats_fetch_begin_irq(&stats->syncp); |
| 1138 | this_tot->vf_rx_packets = stats->rx_packets; |
| 1139 | this_tot->vf_tx_packets = stats->tx_packets; |
| 1140 | this_tot->vf_rx_bytes = stats->rx_bytes; |
| 1141 | this_tot->vf_tx_bytes = stats->tx_bytes; |
| 1142 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); |
| 1143 | this_tot->rx_packets = this_tot->vf_rx_packets; |
| 1144 | this_tot->tx_packets = this_tot->vf_tx_packets; |
| 1145 | this_tot->rx_bytes = this_tot->vf_rx_bytes; |
| 1146 | this_tot->tx_bytes = this_tot->vf_tx_bytes; |
| 1147 | } |
| 1148 | |
| 1149 | /* fetch percpu stats of netvsc */ |
| 1150 | for (i = 0; i < nvdev->num_chn; i++) { |
| 1151 | const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; |
| 1152 | const struct netvsc_stats *stats; |
| 1153 | struct netvsc_ethtool_pcpu_stats *this_tot = |
| 1154 | &pcpu_tot[nvchan->channel->target_cpu]; |
| 1155 | u64 packets, bytes; |
| 1156 | unsigned int start; |
| 1157 | |
| 1158 | stats = &nvchan->tx_stats; |
| 1159 | do { |
| 1160 | start = u64_stats_fetch_begin_irq(&stats->syncp); |
| 1161 | packets = stats->packets; |
| 1162 | bytes = stats->bytes; |
| 1163 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); |
| 1164 | |
| 1165 | this_tot->tx_bytes += bytes; |
| 1166 | this_tot->tx_packets += packets; |
| 1167 | |
| 1168 | stats = &nvchan->rx_stats; |
| 1169 | do { |
| 1170 | start = u64_stats_fetch_begin_irq(&stats->syncp); |
| 1171 | packets = stats->packets; |
| 1172 | bytes = stats->bytes; |
| 1173 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); |
| 1174 | |
| 1175 | this_tot->rx_bytes += bytes; |
| 1176 | this_tot->rx_packets += packets; |
| 1177 | } |
| 1178 | } |
| 1179 | |
stephen hemminger | bc1f447 | 2017-01-06 19:12:52 -0800 | [diff] [blame] | 1180 | static void netvsc_get_stats64(struct net_device *net, |
| 1181 | struct rtnl_link_stats64 *t) |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1182 | { |
| 1183 | struct net_device_context *ndev_ctx = netdev_priv(net); |
stephen hemminger | 776e726 | 2017-04-14 14:42:57 -0700 | [diff] [blame] | 1184 | struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1185 | struct netvsc_vf_pcpu_stats vf_tot; |
stephen hemminger | 89bb42b | 2017-08-09 17:46:08 -0700 | [diff] [blame] | 1186 | int i; |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1187 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1188 | if (!nvdev) |
| 1189 | return; |
| 1190 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1191 | netdev_stats_to_stats64(t, &net->stats); |
| 1192 | |
| 1193 | netvsc_get_vf_stats(net, &vf_tot); |
| 1194 | t->rx_packets += vf_tot.rx_packets; |
| 1195 | t->tx_packets += vf_tot.tx_packets; |
| 1196 | t->rx_bytes += vf_tot.rx_bytes; |
| 1197 | t->tx_bytes += vf_tot.tx_bytes; |
| 1198 | t->tx_dropped += vf_tot.tx_dropped; |
| 1199 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1200 | for (i = 0; i < nvdev->num_chn; i++) { |
| 1201 | const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; |
| 1202 | const struct netvsc_stats *stats; |
| 1203 | u64 packets, bytes, multicast; |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1204 | unsigned int start; |
| 1205 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1206 | stats = &nvchan->tx_stats; |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1207 | do { |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1208 | start = u64_stats_fetch_begin_irq(&stats->syncp); |
| 1209 | packets = stats->packets; |
| 1210 | bytes = stats->bytes; |
| 1211 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1212 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1213 | t->tx_bytes += bytes; |
| 1214 | t->tx_packets += packets; |
| 1215 | |
| 1216 | stats = &nvchan->rx_stats; |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1217 | do { |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1218 | start = u64_stats_fetch_begin_irq(&stats->syncp); |
| 1219 | packets = stats->packets; |
| 1220 | bytes = stats->bytes; |
| 1221 | multicast = stats->multicast + stats->broadcast; |
| 1222 | } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1223 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1224 | t->rx_bytes += bytes; |
| 1225 | t->rx_packets += packets; |
| 1226 | t->multicast += multicast; |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1227 | } |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1228 | } |
Haiyang Zhang | 1ce09e8 | 2012-07-10 07:19:22 +0000 | [diff] [blame] | 1229 | |
| 1230 | static int netvsc_set_mac_addr(struct net_device *ndev, void *p) |
| 1231 | { |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1232 | struct net_device_context *ndc = netdev_priv(ndev); |
stephen hemminger | 16ba326 | 2017-08-09 17:46:05 -0700 | [diff] [blame] | 1233 | struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1234 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
Haiyang Zhang | 1ce09e8 | 2012-07-10 07:19:22 +0000 | [diff] [blame] | 1235 | struct sockaddr *addr = p; |
Haiyang Zhang | 1ce09e8 | 2012-07-10 07:19:22 +0000 | [diff] [blame] | 1236 | int err; |
| 1237 | |
stephen hemminger | 16ba326 | 2017-08-09 17:46:05 -0700 | [diff] [blame] | 1238 | err = eth_prepare_mac_addr_change(ndev, p); |
| 1239 | if (err) |
Haiyang Zhang | 1ce09e8 | 2012-07-10 07:19:22 +0000 | [diff] [blame] | 1240 | return err; |
| 1241 | |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1242 | if (!nvdev) |
| 1243 | return -ENODEV; |
| 1244 | |
stephen hemminger | 16ba326 | 2017-08-09 17:46:05 -0700 | [diff] [blame] | 1245 | if (vf_netdev) { |
| 1246 | err = dev_set_mac_address(vf_netdev, addr); |
| 1247 | if (err) |
| 1248 | return err; |
| 1249 | } |
| 1250 | |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1251 | err = rndis_filter_set_device_mac(nvdev, addr->sa_data); |
stephen hemminger | 16ba326 | 2017-08-09 17:46:05 -0700 | [diff] [blame] | 1252 | if (!err) { |
| 1253 | eth_commit_mac_addr_change(ndev, p); |
| 1254 | } else if (vf_netdev) { |
| 1255 | /* rollback change on VF */ |
| 1256 | memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); |
| 1257 | dev_set_mac_address(vf_netdev, addr); |
Haiyang Zhang | 1ce09e8 | 2012-07-10 07:19:22 +0000 | [diff] [blame] | 1258 | } |
| 1259 | |
| 1260 | return err; |
| 1261 | } |
| 1262 | |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1263 | static const struct { |
| 1264 | char name[ETH_GSTRING_LEN]; |
| 1265 | u16 offset; |
| 1266 | } netvsc_stats[] = { |
| 1267 | { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, |
Stephen Hemminger | f61a9d6 | 2017-12-12 16:48:36 -0800 | [diff] [blame] | 1268 | { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1269 | { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, |
| 1270 | { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, |
| 1271 | { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, |
stephen hemminger | cad5c19 | 2017-08-09 17:46:12 -0700 | [diff] [blame] | 1272 | { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, |
| 1273 | { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, |
Stephen Hemminger | f61a9d6 | 2017-12-12 16:48:36 -0800 | [diff] [blame] | 1274 | { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, |
Simon Xiao | 09af87d | 2017-09-29 11:39:46 -0700 | [diff] [blame] | 1275 | { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, |
| 1276 | { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1277 | }, pcpu_stats[] = { |
| 1278 | { "cpu%u_rx_packets", |
| 1279 | offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, |
| 1280 | { "cpu%u_rx_bytes", |
| 1281 | offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) }, |
| 1282 | { "cpu%u_tx_packets", |
| 1283 | offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) }, |
| 1284 | { "cpu%u_tx_bytes", |
| 1285 | offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) }, |
| 1286 | { "cpu%u_vf_rx_packets", |
| 1287 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) }, |
| 1288 | { "cpu%u_vf_rx_bytes", |
| 1289 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) }, |
| 1290 | { "cpu%u_vf_tx_packets", |
| 1291 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) }, |
| 1292 | { "cpu%u_vf_tx_bytes", |
| 1293 | offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) }, |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1294 | }, vf_stats[] = { |
| 1295 | { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, |
| 1296 | { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, |
| 1297 | { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, |
| 1298 | { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, |
| 1299 | { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1300 | }; |
| 1301 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1302 | #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1303 | #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1304 | |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1305 | /* statistics per queue (rx/tx packets/bytes) */ |
| 1306 | #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) |
| 1307 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1308 | /* 4 statistics per queue (rx/tx packets/bytes) */ |
| 1309 | #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) |
| 1310 | |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1311 | static int netvsc_get_sset_count(struct net_device *dev, int string_set) |
| 1312 | { |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1313 | struct net_device_context *ndc = netdev_priv(dev); |
stephen hemminger | fbd4c7e | 2017-06-07 15:53:47 -0700 | [diff] [blame] | 1314 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1315 | |
| 1316 | if (!nvdev) |
| 1317 | return -ENODEV; |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1318 | |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1319 | switch (string_set) { |
| 1320 | case ETH_SS_STATS: |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1321 | return NETVSC_GLOBAL_STATS_LEN |
| 1322 | + NETVSC_VF_STATS_LEN |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1323 | + NETVSC_QUEUE_STATS_LEN(nvdev) |
| 1324 | + NETVSC_PCPU_STATS_LEN; |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1325 | default: |
| 1326 | return -EINVAL; |
| 1327 | } |
| 1328 | } |
| 1329 | |
| 1330 | static void netvsc_get_ethtool_stats(struct net_device *dev, |
| 1331 | struct ethtool_stats *stats, u64 *data) |
| 1332 | { |
| 1333 | struct net_device_context *ndc = netdev_priv(dev); |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1334 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1335 | const void *nds = &ndc->eth_stats; |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1336 | const struct netvsc_stats *qstats; |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1337 | struct netvsc_vf_pcpu_stats sum; |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1338 | struct netvsc_ethtool_pcpu_stats *pcpu_sum; |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1339 | unsigned int start; |
| 1340 | u64 packets, bytes; |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1341 | int i, j, cpu; |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1342 | |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1343 | if (!nvdev) |
| 1344 | return; |
| 1345 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1346 | for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1347 | data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1348 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1349 | netvsc_get_vf_stats(dev, &sum); |
| 1350 | for (j = 0; j < NETVSC_VF_STATS_LEN; j++) |
| 1351 | data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); |
| 1352 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1353 | for (j = 0; j < nvdev->num_chn; j++) { |
| 1354 | qstats = &nvdev->chan_table[j].tx_stats; |
| 1355 | |
| 1356 | do { |
| 1357 | start = u64_stats_fetch_begin_irq(&qstats->syncp); |
| 1358 | packets = qstats->packets; |
| 1359 | bytes = qstats->bytes; |
| 1360 | } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); |
| 1361 | data[i++] = packets; |
| 1362 | data[i++] = bytes; |
| 1363 | |
| 1364 | qstats = &nvdev->chan_table[j].rx_stats; |
| 1365 | do { |
| 1366 | start = u64_stats_fetch_begin_irq(&qstats->syncp); |
| 1367 | packets = qstats->packets; |
| 1368 | bytes = qstats->bytes; |
| 1369 | } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); |
| 1370 | data[i++] = packets; |
| 1371 | data[i++] = bytes; |
| 1372 | } |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1373 | |
| 1374 | pcpu_sum = kvmalloc_array(num_possible_cpus(), |
| 1375 | sizeof(struct netvsc_ethtool_pcpu_stats), |
| 1376 | GFP_KERNEL); |
| 1377 | netvsc_get_pcpu_stats(dev, pcpu_sum); |
| 1378 | for_each_present_cpu(cpu) { |
| 1379 | struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; |
| 1380 | |
| 1381 | for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++) |
| 1382 | data[i++] = *(u64 *)((void *)this_sum |
| 1383 | + pcpu_stats[j].offset); |
| 1384 | } |
| 1385 | kvfree(pcpu_sum); |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1386 | } |
| 1387 | |
| 1388 | static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
| 1389 | { |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1390 | struct net_device_context *ndc = netdev_priv(dev); |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1391 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1392 | u8 *p = data; |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1393 | int i, cpu; |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1394 | |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1395 | if (!nvdev) |
| 1396 | return; |
| 1397 | |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1398 | switch (stringset) { |
| 1399 | case ETH_SS_STATS: |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1400 | for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { |
| 1401 | memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); |
| 1402 | p += ETH_GSTRING_LEN; |
| 1403 | } |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1404 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1405 | for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { |
| 1406 | memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); |
| 1407 | p += ETH_GSTRING_LEN; |
| 1408 | } |
| 1409 | |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 1410 | for (i = 0; i < nvdev->num_chn; i++) { |
| 1411 | sprintf(p, "tx_queue_%u_packets", i); |
| 1412 | p += ETH_GSTRING_LEN; |
| 1413 | sprintf(p, "tx_queue_%u_bytes", i); |
| 1414 | p += ETH_GSTRING_LEN; |
| 1415 | sprintf(p, "rx_queue_%u_packets", i); |
| 1416 | p += ETH_GSTRING_LEN; |
| 1417 | sprintf(p, "rx_queue_%u_bytes", i); |
| 1418 | p += ETH_GSTRING_LEN; |
| 1419 | } |
| 1420 | |
Yidong Ren | 6ae7467 | 2018-07-30 17:09:45 +0000 | [diff] [blame] | 1421 | for_each_present_cpu(cpu) { |
| 1422 | for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) { |
| 1423 | sprintf(p, pcpu_stats[i].name, cpu); |
| 1424 | p += ETH_GSTRING_LEN; |
| 1425 | } |
| 1426 | } |
| 1427 | |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1428 | break; |
| 1429 | } |
| 1430 | } |
| 1431 | |
stephen hemminger | b448f4e | 2017-01-24 13:06:00 -0800 | [diff] [blame] | 1432 | static int |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1433 | netvsc_get_rss_hash_opts(struct net_device_context *ndc, |
| 1434 | struct ethtool_rxnfc *info) |
stephen hemminger | b5a5dc8 | 2017-01-24 13:06:01 -0800 | [diff] [blame] | 1435 | { |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1436 | const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3; |
| 1437 | |
stephen hemminger | b5a5dc8 | 2017-01-24 13:06:01 -0800 | [diff] [blame] | 1438 | info->data = RXH_IP_SRC | RXH_IP_DST; |
| 1439 | |
| 1440 | switch (info->flow_type) { |
| 1441 | case TCP_V4_FLOW: |
Haiyang Zhang | 0518ec4f | 2017-10-06 08:33:58 -0700 | [diff] [blame] | 1442 | if (ndc->l4_hash & HV_TCP4_L4HASH) |
| 1443 | info->data |= l4_flag; |
| 1444 | |
| 1445 | break; |
| 1446 | |
stephen hemminger | b5a5dc8 | 2017-01-24 13:06:01 -0800 | [diff] [blame] | 1447 | case TCP_V6_FLOW: |
Haiyang Zhang | 0518ec4f | 2017-10-06 08:33:58 -0700 | [diff] [blame] | 1448 | if (ndc->l4_hash & HV_TCP6_L4HASH) |
| 1449 | info->data |= l4_flag; |
| 1450 | |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1451 | break; |
| 1452 | |
stephen hemminger | b5a5dc8 | 2017-01-24 13:06:01 -0800 | [diff] [blame] | 1453 | case UDP_V4_FLOW: |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1454 | if (ndc->l4_hash & HV_UDP4_L4HASH) |
| 1455 | info->data |= l4_flag; |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1456 | |
| 1457 | break; |
| 1458 | |
stephen hemminger | b5a5dc8 | 2017-01-24 13:06:01 -0800 | [diff] [blame] | 1459 | case UDP_V6_FLOW: |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1460 | if (ndc->l4_hash & HV_UDP6_L4HASH) |
| 1461 | info->data |= l4_flag; |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1462 | |
| 1463 | break; |
| 1464 | |
stephen hemminger | b5a5dc8 | 2017-01-24 13:06:01 -0800 | [diff] [blame] | 1465 | case IPV4_FLOW: |
| 1466 | case IPV6_FLOW: |
| 1467 | break; |
| 1468 | default: |
| 1469 | info->data = 0; |
| 1470 | break; |
| 1471 | } |
| 1472 | |
| 1473 | return 0; |
| 1474 | } |
| 1475 | |
| 1476 | static int |
stephen hemminger | b448f4e | 2017-01-24 13:06:00 -0800 | [diff] [blame] | 1477 | netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, |
| 1478 | u32 *rules) |
| 1479 | { |
| 1480 | struct net_device_context *ndc = netdev_priv(dev); |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1481 | struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1482 | |
| 1483 | if (!nvdev) |
| 1484 | return -ENODEV; |
stephen hemminger | b448f4e | 2017-01-24 13:06:00 -0800 | [diff] [blame] | 1485 | |
| 1486 | switch (info->cmd) { |
| 1487 | case ETHTOOL_GRXRINGS: |
| 1488 | info->data = nvdev->num_chn; |
| 1489 | return 0; |
stephen hemminger | b5a5dc8 | 2017-01-24 13:06:01 -0800 | [diff] [blame] | 1490 | |
| 1491 | case ETHTOOL_GRXFH: |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1492 | return netvsc_get_rss_hash_opts(ndc, info); |
stephen hemminger | b448f4e | 2017-01-24 13:06:00 -0800 | [diff] [blame] | 1493 | } |
| 1494 | return -EOPNOTSUPP; |
| 1495 | } |
| 1496 | |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1497 | static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, |
| 1498 | struct ethtool_rxnfc *info) |
| 1499 | { |
| 1500 | if (info->data == (RXH_IP_SRC | RXH_IP_DST | |
| 1501 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) { |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1502 | switch (info->flow_type) { |
Haiyang Zhang | 0518ec4f | 2017-10-06 08:33:58 -0700 | [diff] [blame] | 1503 | case TCP_V4_FLOW: |
| 1504 | ndc->l4_hash |= HV_TCP4_L4HASH; |
| 1505 | break; |
| 1506 | |
| 1507 | case TCP_V6_FLOW: |
| 1508 | ndc->l4_hash |= HV_TCP6_L4HASH; |
| 1509 | break; |
| 1510 | |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1511 | case UDP_V4_FLOW: |
| 1512 | ndc->l4_hash |= HV_UDP4_L4HASH; |
| 1513 | break; |
| 1514 | |
| 1515 | case UDP_V6_FLOW: |
| 1516 | ndc->l4_hash |= HV_UDP6_L4HASH; |
| 1517 | break; |
| 1518 | |
| 1519 | default: |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1520 | return -EOPNOTSUPP; |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1521 | } |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1522 | |
| 1523 | return 0; |
| 1524 | } |
| 1525 | |
| 1526 | if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1527 | switch (info->flow_type) { |
Haiyang Zhang | 0518ec4f | 2017-10-06 08:33:58 -0700 | [diff] [blame] | 1528 | case TCP_V4_FLOW: |
| 1529 | ndc->l4_hash &= ~HV_TCP4_L4HASH; |
| 1530 | break; |
| 1531 | |
| 1532 | case TCP_V6_FLOW: |
| 1533 | ndc->l4_hash &= ~HV_TCP6_L4HASH; |
| 1534 | break; |
| 1535 | |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1536 | case UDP_V4_FLOW: |
| 1537 | ndc->l4_hash &= ~HV_UDP4_L4HASH; |
| 1538 | break; |
| 1539 | |
| 1540 | case UDP_V6_FLOW: |
| 1541 | ndc->l4_hash &= ~HV_UDP6_L4HASH; |
| 1542 | break; |
| 1543 | |
| 1544 | default: |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1545 | return -EOPNOTSUPP; |
Haiyang Zhang | 486e398 | 2017-10-06 08:33:57 -0700 | [diff] [blame] | 1546 | } |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1547 | |
| 1548 | return 0; |
| 1549 | } |
| 1550 | |
| 1551 | return -EOPNOTSUPP; |
| 1552 | } |
| 1553 | |
| 1554 | static int |
| 1555 | netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) |
| 1556 | { |
| 1557 | struct net_device_context *ndc = netdev_priv(ndev); |
| 1558 | |
| 1559 | if (info->cmd == ETHTOOL_SRXFH) |
| 1560 | return netvsc_set_rss_hash_opts(ndc, info); |
| 1561 | |
| 1562 | return -EOPNOTSUPP; |
| 1563 | } |
| 1564 | |
Richard Weinberger | 316158f | 2014-07-09 16:23:59 +0200 | [diff] [blame] | 1565 | #ifdef CONFIG_NET_POLL_CONTROLLER |
stephen hemminger | a5ecd43 | 2017-06-07 15:53:48 -0700 | [diff] [blame] | 1566 | static void netvsc_poll_controller(struct net_device *dev) |
Richard Weinberger | 316158f | 2014-07-09 16:23:59 +0200 | [diff] [blame] | 1567 | { |
stephen hemminger | a5ecd43 | 2017-06-07 15:53:48 -0700 | [diff] [blame] | 1568 | struct net_device_context *ndc = netdev_priv(dev); |
| 1569 | struct netvsc_device *ndev; |
| 1570 | int i; |
| 1571 | |
| 1572 | rcu_read_lock(); |
| 1573 | ndev = rcu_dereference(ndc->nvdev); |
| 1574 | if (ndev) { |
| 1575 | for (i = 0; i < ndev->num_chn; i++) { |
| 1576 | struct netvsc_channel *nvchan = &ndev->chan_table[i]; |
| 1577 | |
| 1578 | napi_schedule(&nvchan->napi); |
| 1579 | } |
| 1580 | } |
| 1581 | rcu_read_unlock(); |
Richard Weinberger | 316158f | 2014-07-09 16:23:59 +0200 | [diff] [blame] | 1582 | } |
| 1583 | #endif |
Haiyang Zhang | 1ce09e8 | 2012-07-10 07:19:22 +0000 | [diff] [blame] | 1584 | |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1585 | static u32 netvsc_get_rxfh_key_size(struct net_device *dev) |
| 1586 | { |
| 1587 | return NETVSC_HASH_KEYLEN; |
| 1588 | } |
| 1589 | |
| 1590 | static u32 netvsc_rss_indir_size(struct net_device *dev) |
| 1591 | { |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1592 | return ITAB_NUM; |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1593 | } |
| 1594 | |
| 1595 | static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, |
| 1596 | u8 *hfunc) |
| 1597 | { |
| 1598 | struct net_device_context *ndc = netdev_priv(dev); |
stephen hemminger | 867047c | 2017-07-28 08:59:42 -0700 | [diff] [blame] | 1599 | struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); |
Colin Ian King | eb996ed | 2017-03-25 14:26:39 +0000 | [diff] [blame] | 1600 | struct rndis_device *rndis_dev; |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1601 | int i; |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1602 | |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1603 | if (!ndev) |
| 1604 | return -ENODEV; |
| 1605 | |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1606 | if (hfunc) |
| 1607 | *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ |
| 1608 | |
Colin Ian King | eb996ed | 2017-03-25 14:26:39 +0000 | [diff] [blame] | 1609 | rndis_dev = ndev->extension; |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1610 | if (indir) { |
| 1611 | for (i = 0; i < ITAB_NUM; i++) |
Haiyang Zhang | 47371300 | 2017-10-13 12:28:03 -0700 | [diff] [blame] | 1612 | indir[i] = rndis_dev->rx_table[i]; |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1613 | } |
| 1614 | |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1615 | if (key) |
| 1616 | memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN); |
| 1617 | |
| 1618 | return 0; |
| 1619 | } |
| 1620 | |
| 1621 | static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, |
| 1622 | const u8 *key, const u8 hfunc) |
| 1623 | { |
| 1624 | struct net_device_context *ndc = netdev_priv(dev); |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1625 | struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); |
Colin Ian King | eb996ed | 2017-03-25 14:26:39 +0000 | [diff] [blame] | 1626 | struct rndis_device *rndis_dev; |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1627 | int i; |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1628 | |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 1629 | if (!ndev) |
| 1630 | return -ENODEV; |
| 1631 | |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1632 | if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) |
| 1633 | return -EOPNOTSUPP; |
| 1634 | |
Colin Ian King | eb996ed | 2017-03-25 14:26:39 +0000 | [diff] [blame] | 1635 | rndis_dev = ndev->extension; |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1636 | if (indir) { |
| 1637 | for (i = 0; i < ITAB_NUM; i++) |
Haiyang Zhang | db3cd7a | 2017-09-01 14:30:07 -0700 | [diff] [blame] | 1638 | if (indir[i] >= ndev->num_chn) |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1639 | return -EINVAL; |
| 1640 | |
| 1641 | for (i = 0; i < ITAB_NUM; i++) |
Haiyang Zhang | 47371300 | 2017-10-13 12:28:03 -0700 | [diff] [blame] | 1642 | rndis_dev->rx_table[i] = indir[i]; |
stephen hemminger | ff4a441 | 2017-01-24 13:06:04 -0800 | [diff] [blame] | 1643 | } |
| 1644 | |
| 1645 | if (!key) { |
| 1646 | if (!indir) |
| 1647 | return 0; |
| 1648 | |
| 1649 | key = rndis_dev->rss_key; |
| 1650 | } |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1651 | |
Haiyang Zhang | 715e2ec | 2017-09-01 14:30:04 -0700 | [diff] [blame] | 1652 | return rndis_filter_set_rss_param(rndis_dev, key); |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1653 | } |
| 1654 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1655 | /* Hyper-V RNDIS protocol does not have ring in the HW sense. |
| 1656 | * It does have pre-allocated receive area which is divided into sections. |
| 1657 | */ |
| 1658 | static void __netvsc_get_ringparam(struct netvsc_device *nvdev, |
| 1659 | struct ethtool_ringparam *ring) |
| 1660 | { |
| 1661 | u32 max_buf_size; |
| 1662 | |
| 1663 | ring->rx_pending = nvdev->recv_section_cnt; |
| 1664 | ring->tx_pending = nvdev->send_section_cnt; |
| 1665 | |
| 1666 | if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) |
| 1667 | max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; |
| 1668 | else |
| 1669 | max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; |
| 1670 | |
| 1671 | ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; |
| 1672 | ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE |
| 1673 | / nvdev->send_section_size; |
| 1674 | } |
| 1675 | |
| 1676 | static void netvsc_get_ringparam(struct net_device *ndev, |
| 1677 | struct ethtool_ringparam *ring) |
| 1678 | { |
| 1679 | struct net_device_context *ndevctx = netdev_priv(ndev); |
| 1680 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
| 1681 | |
| 1682 | if (!nvdev) |
| 1683 | return; |
| 1684 | |
| 1685 | __netvsc_get_ringparam(nvdev, ring); |
| 1686 | } |
| 1687 | |
| 1688 | static int netvsc_set_ringparam(struct net_device *ndev, |
| 1689 | struct ethtool_ringparam *ring) |
| 1690 | { |
| 1691 | struct net_device_context *ndevctx = netdev_priv(ndev); |
| 1692 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1693 | struct netvsc_device_info device_info; |
| 1694 | struct ethtool_ringparam orig; |
| 1695 | u32 new_tx, new_rx; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1696 | int ret = 0; |
| 1697 | |
| 1698 | if (!nvdev || nvdev->destroy) |
| 1699 | return -ENODEV; |
| 1700 | |
| 1701 | memset(&orig, 0, sizeof(orig)); |
| 1702 | __netvsc_get_ringparam(nvdev, &orig); |
| 1703 | |
| 1704 | new_tx = clamp_t(u32, ring->tx_pending, |
| 1705 | NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); |
| 1706 | new_rx = clamp_t(u32, ring->rx_pending, |
| 1707 | NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); |
| 1708 | |
| 1709 | if (new_tx == orig.tx_pending && |
| 1710 | new_rx == orig.rx_pending) |
| 1711 | return 0; /* no change */ |
| 1712 | |
| 1713 | memset(&device_info, 0, sizeof(device_info)); |
| 1714 | device_info.num_chn = nvdev->num_chn; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1715 | device_info.send_sections = new_tx; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 1716 | device_info.send_section_size = nvdev->send_section_size; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1717 | device_info.recv_sections = new_rx; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 1718 | device_info.recv_section_size = nvdev->recv_section_size; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1719 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1720 | ret = netvsc_detach(ndev, nvdev); |
| 1721 | if (ret) |
| 1722 | return ret; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1723 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1724 | ret = netvsc_attach(ndev, &device_info); |
| 1725 | if (ret) { |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1726 | device_info.send_sections = orig.tx_pending; |
| 1727 | device_info.recv_sections = orig.rx_pending; |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 1728 | |
| 1729 | if (netvsc_attach(ndev, &device_info)) |
| 1730 | netdev_err(ndev, "restoring ringparam failed"); |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1731 | } |
| 1732 | |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1733 | return ret; |
| 1734 | } |
| 1735 | |
Haiyang Zhang | 273de02 | 2018-05-22 11:29:34 -0700 | [diff] [blame] | 1736 | static u32 netvsc_get_msglevel(struct net_device *ndev) |
| 1737 | { |
| 1738 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| 1739 | |
| 1740 | return ndev_ctx->msg_enable; |
| 1741 | } |
| 1742 | |
| 1743 | static void netvsc_set_msglevel(struct net_device *ndev, u32 val) |
| 1744 | { |
| 1745 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| 1746 | |
| 1747 | ndev_ctx->msg_enable = val; |
| 1748 | } |
| 1749 | |
Stephen Hemminger | f82f4ad | 2010-05-04 09:58:57 -0700 | [diff] [blame] | 1750 | static const struct ethtool_ops ethtool_ops = { |
| 1751 | .get_drvinfo = netvsc_get_drvinfo, |
Haiyang Zhang | 273de02 | 2018-05-22 11:29:34 -0700 | [diff] [blame] | 1752 | .get_msglevel = netvsc_get_msglevel, |
| 1753 | .set_msglevel = netvsc_set_msglevel, |
Stephen Hemminger | f82f4ad | 2010-05-04 09:58:57 -0700 | [diff] [blame] | 1754 | .get_link = ethtool_op_get_link, |
Stephen Hemminger | 4323b47 | 2016-08-23 12:17:57 -0700 | [diff] [blame] | 1755 | .get_ethtool_stats = netvsc_get_ethtool_stats, |
| 1756 | .get_sset_count = netvsc_get_sset_count, |
| 1757 | .get_strings = netvsc_get_strings, |
Andrew Schwartzmeyer | 5999537 | 2015-02-26 16:27:14 -0800 | [diff] [blame] | 1758 | .get_channels = netvsc_get_channels, |
Andrew Schwartzmeyer | b5960e6 | 2015-08-11 17:14:32 -0700 | [diff] [blame] | 1759 | .set_channels = netvsc_set_channels, |
sixiao@microsoft.com | 76d13b5 | 2016-02-17 16:43:59 -0800 | [diff] [blame] | 1760 | .get_ts_info = ethtool_op_get_ts_info, |
stephen hemminger | b448f4e | 2017-01-24 13:06:00 -0800 | [diff] [blame] | 1761 | .get_rxnfc = netvsc_get_rxnfc, |
Haiyang Zhang | 4823eb2 | 2017-08-21 19:22:39 -0700 | [diff] [blame] | 1762 | .set_rxnfc = netvsc_set_rxnfc, |
stephen hemminger | 962f3fe | 2017-01-24 13:06:02 -0800 | [diff] [blame] | 1763 | .get_rxfh_key_size = netvsc_get_rxfh_key_size, |
| 1764 | .get_rxfh_indir_size = netvsc_rss_indir_size, |
| 1765 | .get_rxfh = netvsc_get_rxfh, |
| 1766 | .set_rxfh = netvsc_set_rxfh, |
Philippe Reynes | 5e8456f | 2017-03-08 23:41:04 +0100 | [diff] [blame] | 1767 | .get_link_ksettings = netvsc_get_link_ksettings, |
| 1768 | .set_link_ksettings = netvsc_set_link_ksettings, |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 1769 | .get_ringparam = netvsc_get_ringparam, |
| 1770 | .set_ringparam = netvsc_set_ringparam, |
Stephen Hemminger | f82f4ad | 2010-05-04 09:58:57 -0700 | [diff] [blame] | 1771 | }; |
| 1772 | |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 1773 | static const struct net_device_ops device_ops = { |
| 1774 | .ndo_open = netvsc_open, |
| 1775 | .ndo_stop = netvsc_close, |
| 1776 | .ndo_start_xmit = netvsc_start_xmit, |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 1777 | .ndo_change_rx_flags = netvsc_change_rx_flags, |
| 1778 | .ndo_set_rx_mode = netvsc_set_rx_mode, |
Haiyang Zhang | 4d447c9 | 2011-12-15 13:45:17 -0800 | [diff] [blame] | 1779 | .ndo_change_mtu = netvsc_change_mtu, |
Haiyang Zhang | b681b58 | 2010-08-03 19:15:31 +0000 | [diff] [blame] | 1780 | .ndo_validate_addr = eth_validate_addr, |
Haiyang Zhang | 1ce09e8 | 2012-07-10 07:19:22 +0000 | [diff] [blame] | 1781 | .ndo_set_mac_address = netvsc_set_mac_addr, |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 1782 | .ndo_select_queue = netvsc_select_queue, |
sixiao@microsoft.com | 7eafd9b | 2015-05-14 01:00:25 -0700 | [diff] [blame] | 1783 | .ndo_get_stats64 = netvsc_get_stats64, |
Richard Weinberger | 316158f | 2014-07-09 16:23:59 +0200 | [diff] [blame] | 1784 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1785 | .ndo_poll_controller = netvsc_poll_controller, |
| 1786 | #endif |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 1787 | }; |
| 1788 | |
Haiyang Zhang | c996edc | 2011-04-06 15:18:00 -0700 | [diff] [blame] | 1789 | /* |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1790 | * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link |
| 1791 | * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is |
| 1792 | * present send GARP packet to network peers with netif_notify_peers(). |
Haiyang Zhang | c996edc | 2011-04-06 15:18:00 -0700 | [diff] [blame] | 1793 | */ |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 1794 | static void netvsc_link_change(struct work_struct *w) |
Haiyang Zhang | c996edc | 2011-04-06 15:18:00 -0700 | [diff] [blame] | 1795 | { |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 1796 | struct net_device_context *ndev_ctx = |
| 1797 | container_of(w, struct net_device_context, dwork.work); |
| 1798 | struct hv_device *device_obj = ndev_ctx->device_ctx; |
| 1799 | struct net_device *net = hv_get_drvdata(device_obj); |
K. Y. Srinivasan | 2ddd5e5 | 2011-09-13 10:59:49 -0700 | [diff] [blame] | 1800 | struct netvsc_device *net_device; |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 1801 | struct rndis_device *rdev; |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1802 | struct netvsc_reconfig *event = NULL; |
| 1803 | bool notify = false, reschedule = false; |
| 1804 | unsigned long flags, next_reconfig, delay; |
Haiyang Zhang | c996edc | 2011-04-06 15:18:00 -0700 | [diff] [blame] | 1805 | |
stephen hemminger | 9b4e946 | 2017-08-24 16:49:16 -0700 | [diff] [blame] | 1806 | /* if changes are happening, comeback later */ |
| 1807 | if (!rtnl_trylock()) { |
| 1808 | schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); |
| 1809 | return; |
| 1810 | } |
| 1811 | |
stephen hemminger | a0be450 | 2017-03-22 14:51:01 -0700 | [diff] [blame] | 1812 | net_device = rtnl_dereference(ndev_ctx->nvdev); |
| 1813 | if (!net_device) |
Vitaly Kuznetsov | 1bdcec8 | 2016-05-13 13:55:21 +0200 | [diff] [blame] | 1814 | goto out_unlock; |
| 1815 | |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 1816 | rdev = net_device->extension; |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 1817 | |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1818 | next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; |
| 1819 | if (time_is_after_jiffies(next_reconfig)) { |
| 1820 | /* link_watch only sends one notification with current state |
| 1821 | * per second, avoid doing reconfig more frequently. Handle |
| 1822 | * wrap around. |
| 1823 | */ |
| 1824 | delay = next_reconfig - jiffies; |
| 1825 | delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; |
| 1826 | schedule_delayed_work(&ndev_ctx->dwork, delay); |
Vitaly Kuznetsov | 1bdcec8 | 2016-05-13 13:55:21 +0200 | [diff] [blame] | 1827 | goto out_unlock; |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1828 | } |
| 1829 | ndev_ctx->last_reconfig = jiffies; |
| 1830 | |
| 1831 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
| 1832 | if (!list_empty(&ndev_ctx->reconfig_events)) { |
| 1833 | event = list_first_entry(&ndev_ctx->reconfig_events, |
| 1834 | struct netvsc_reconfig, list); |
| 1835 | list_del(&event->list); |
| 1836 | reschedule = !list_empty(&ndev_ctx->reconfig_events); |
| 1837 | } |
| 1838 | spin_unlock_irqrestore(&ndev_ctx->lock, flags); |
| 1839 | |
| 1840 | if (!event) |
Vitaly Kuznetsov | 1bdcec8 | 2016-05-13 13:55:21 +0200 | [diff] [blame] | 1841 | goto out_unlock; |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1842 | |
| 1843 | switch (event->event) { |
| 1844 | /* Only the following events are possible due to the check in |
| 1845 | * netvsc_linkstatus_callback() |
| 1846 | */ |
| 1847 | case RNDIS_STATUS_MEDIA_CONNECT: |
| 1848 | if (rdev->link_state) { |
| 1849 | rdev->link_state = false; |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1850 | netif_carrier_on(net); |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1851 | netif_tx_wake_all_queues(net); |
| 1852 | } else { |
| 1853 | notify = true; |
Haiyang Zhang | 3a494e7 | 2014-06-19 18:34:36 -0700 | [diff] [blame] | 1854 | } |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1855 | kfree(event); |
| 1856 | break; |
| 1857 | case RNDIS_STATUS_MEDIA_DISCONNECT: |
| 1858 | if (!rdev->link_state) { |
| 1859 | rdev->link_state = true; |
| 1860 | netif_carrier_off(net); |
| 1861 | netif_tx_stop_all_queues(net); |
| 1862 | } |
| 1863 | kfree(event); |
| 1864 | break; |
| 1865 | case RNDIS_STATUS_NETWORK_CHANGE: |
| 1866 | /* Only makes sense if carrier is present */ |
| 1867 | if (!rdev->link_state) { |
| 1868 | rdev->link_state = true; |
| 1869 | netif_carrier_off(net); |
| 1870 | netif_tx_stop_all_queues(net); |
| 1871 | event->event = RNDIS_STATUS_MEDIA_CONNECT; |
| 1872 | spin_lock_irqsave(&ndev_ctx->lock, flags); |
Haiyang Zhang | 15cfd40 | 2016-04-21 16:13:01 -0700 | [diff] [blame] | 1873 | list_add(&event->list, &ndev_ctx->reconfig_events); |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1874 | spin_unlock_irqrestore(&ndev_ctx->lock, flags); |
| 1875 | reschedule = true; |
| 1876 | } |
| 1877 | break; |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 1878 | } |
| 1879 | |
| 1880 | rtnl_unlock(); |
| 1881 | |
| 1882 | if (notify) |
| 1883 | netdev_notify_peers(net); |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 1884 | |
| 1885 | /* link_watch only sends one notification with current state per |
| 1886 | * second, handle next reconfig event in 2 seconds. |
| 1887 | */ |
| 1888 | if (reschedule) |
| 1889 | schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); |
Vitaly Kuznetsov | 1bdcec8 | 2016-05-13 13:55:21 +0200 | [diff] [blame] | 1890 | |
| 1891 | return; |
| 1892 | |
| 1893 | out_unlock: |
| 1894 | rtnl_unlock(); |
Haiyang Zhang | c996edc | 2011-04-06 15:18:00 -0700 | [diff] [blame] | 1895 | } |
| 1896 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 1897 | static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) |
| 1898 | { |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 1899 | struct net_device_context *net_device_ctx; |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 1900 | struct net_device *dev; |
| 1901 | |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 1902 | dev = netdev_master_upper_dev_get(vf_netdev); |
| 1903 | if (!dev || dev->netdev_ops != &device_ops) |
| 1904 | return NULL; /* not a netvsc device */ |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 1905 | |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 1906 | net_device_ctx = netdev_priv(dev); |
| 1907 | if (!rtnl_dereference(net_device_ctx->nvdev)) |
| 1908 | return NULL; /* device is removed */ |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 1909 | |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 1910 | return dev; |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 1911 | } |
| 1912 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1913 | /* Called when VF is injecting data into network stack. |
| 1914 | * Change the associated network device from VF to netvsc. |
| 1915 | * note: already called with rcu_read_lock |
| 1916 | */ |
| 1917 | static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) |
| 1918 | { |
| 1919 | struct sk_buff *skb = *pskb; |
| 1920 | struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); |
| 1921 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| 1922 | struct netvsc_vf_pcpu_stats *pcpu_stats |
| 1923 | = this_cpu_ptr(ndev_ctx->vf_stats); |
| 1924 | |
| 1925 | skb->dev = ndev; |
| 1926 | |
| 1927 | u64_stats_update_begin(&pcpu_stats->syncp); |
| 1928 | pcpu_stats->rx_packets++; |
| 1929 | pcpu_stats->rx_bytes += skb->len; |
| 1930 | u64_stats_update_end(&pcpu_stats->syncp); |
| 1931 | |
| 1932 | return RX_HANDLER_ANOTHER; |
| 1933 | } |
| 1934 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 1935 | static int netvsc_vf_join(struct net_device *vf_netdev, |
| 1936 | struct net_device *ndev) |
| 1937 | { |
| 1938 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
| 1939 | int ret; |
| 1940 | |
| 1941 | ret = netdev_rx_handler_register(vf_netdev, |
| 1942 | netvsc_vf_handle_frame, ndev); |
| 1943 | if (ret != 0) { |
| 1944 | netdev_err(vf_netdev, |
| 1945 | "can not register netvsc VF receive handler (err = %d)\n", |
| 1946 | ret); |
| 1947 | goto rx_handler_failed; |
| 1948 | } |
| 1949 | |
| 1950 | ret = netdev_master_upper_dev_link(vf_netdev, ndev, |
| 1951 | NULL, NULL, NULL); |
| 1952 | if (ret != 0) { |
| 1953 | netdev_err(vf_netdev, |
| 1954 | "can not set master device %s (err = %d)\n", |
| 1955 | ndev->name, ret); |
| 1956 | goto upper_link_failed; |
| 1957 | } |
| 1958 | |
| 1959 | /* set slave flag before open to prevent IPv6 addrconf */ |
| 1960 | vf_netdev->flags |= IFF_SLAVE; |
| 1961 | |
| 1962 | schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); |
| 1963 | |
| 1964 | call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); |
| 1965 | |
| 1966 | netdev_info(vf_netdev, "joined to %s\n", ndev->name); |
| 1967 | return 0; |
| 1968 | |
| 1969 | upper_link_failed: |
| 1970 | netdev_rx_handler_unregister(vf_netdev); |
| 1971 | rx_handler_failed: |
| 1972 | return ret; |
| 1973 | } |
| 1974 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1975 | static void __netvsc_vf_setup(struct net_device *ndev, |
| 1976 | struct net_device *vf_netdev) |
| 1977 | { |
| 1978 | int ret; |
| 1979 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1980 | /* Align MTU of VF with master */ |
| 1981 | ret = dev_set_mtu(vf_netdev, ndev->mtu); |
| 1982 | if (ret) |
| 1983 | netdev_warn(vf_netdev, |
| 1984 | "unable to change mtu to %u\n", ndev->mtu); |
| 1985 | |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 1986 | /* set multicast etc flags on VF */ |
| 1987 | dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); |
Stephen Hemminger | b0dee79 | 2018-03-07 13:49:12 -0800 | [diff] [blame] | 1988 | |
| 1989 | /* sync address list from ndev to VF */ |
| 1990 | netif_addr_lock_bh(ndev); |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 1991 | dev_uc_sync(vf_netdev, ndev); |
| 1992 | dev_mc_sync(vf_netdev, ndev); |
Stephen Hemminger | b0dee79 | 2018-03-07 13:49:12 -0800 | [diff] [blame] | 1993 | netif_addr_unlock_bh(ndev); |
Stephen Hemminger | bee9d41 | 2018-03-02 13:49:09 -0800 | [diff] [blame] | 1994 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 1995 | if (netif_running(ndev)) { |
| 1996 | ret = dev_open(vf_netdev); |
| 1997 | if (ret) |
| 1998 | netdev_warn(vf_netdev, |
| 1999 | "unable to open: %d\n", ret); |
| 2000 | } |
| 2001 | } |
| 2002 | |
| 2003 | /* Setup VF as slave of the synthetic device. |
| 2004 | * Runs in workqueue to avoid recursion in netlink callbacks. |
| 2005 | */ |
| 2006 | static void netvsc_vf_setup(struct work_struct *w) |
| 2007 | { |
| 2008 | struct net_device_context *ndev_ctx |
stephen hemminger | 6123c66 | 2017-08-09 17:46:03 -0700 | [diff] [blame] | 2009 | = container_of(w, struct net_device_context, vf_takeover.work); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2010 | struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); |
| 2011 | struct net_device *vf_netdev; |
| 2012 | |
stephen hemminger | fb84af8 | 2017-08-04 12:14:00 -0700 | [diff] [blame] | 2013 | if (!rtnl_trylock()) { |
stephen hemminger | 6123c66 | 2017-08-09 17:46:03 -0700 | [diff] [blame] | 2014 | schedule_delayed_work(&ndev_ctx->vf_takeover, 0); |
stephen hemminger | fb84af8 | 2017-08-04 12:14:00 -0700 | [diff] [blame] | 2015 | return; |
| 2016 | } |
| 2017 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2018 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
| 2019 | if (vf_netdev) |
| 2020 | __netvsc_vf_setup(ndev, vf_netdev); |
| 2021 | |
| 2022 | rtnl_unlock(); |
| 2023 | } |
| 2024 | |
Haiyang Zhang | 3d7d10b | 2018-10-15 19:06:15 +0000 | [diff] [blame] | 2025 | /* Find netvsc by VF serial number. |
| 2026 | * The PCI hyperv controller records the serial number as the slot kobj name. |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 2027 | */ |
| 2028 | static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) |
| 2029 | { |
| 2030 | struct device *parent = vf_netdev->dev.parent; |
| 2031 | struct net_device_context *ndev_ctx; |
| 2032 | struct pci_dev *pdev; |
Haiyang Zhang | 3d7d10b | 2018-10-15 19:06:15 +0000 | [diff] [blame] | 2033 | u32 serial; |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 2034 | |
| 2035 | if (!parent || !dev_is_pci(parent)) |
| 2036 | return NULL; /* not a PCI device */ |
| 2037 | |
| 2038 | pdev = to_pci_dev(parent); |
| 2039 | if (!pdev->slot) { |
| 2040 | netdev_notice(vf_netdev, "no PCI slot information\n"); |
| 2041 | return NULL; |
| 2042 | } |
| 2043 | |
Haiyang Zhang | 3d7d10b | 2018-10-15 19:06:15 +0000 | [diff] [blame] | 2044 | if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) { |
| 2045 | netdev_notice(vf_netdev, "Invalid vf serial:%s\n", |
| 2046 | pci_slot_name(pdev->slot)); |
| 2047 | return NULL; |
| 2048 | } |
| 2049 | |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 2050 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { |
| 2051 | if (!ndev_ctx->vf_alloc) |
| 2052 | continue; |
| 2053 | |
Haiyang Zhang | 3d7d10b | 2018-10-15 19:06:15 +0000 | [diff] [blame] | 2054 | if (ndev_ctx->vf_serial == serial) |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 2055 | return hv_get_drvdata(ndev_ctx->device_ctx); |
| 2056 | } |
| 2057 | |
| 2058 | netdev_notice(vf_netdev, |
Haiyang Zhang | 3d7d10b | 2018-10-15 19:06:15 +0000 | [diff] [blame] | 2059 | "no netdev found for vf serial:%u\n", serial); |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 2060 | return NULL; |
| 2061 | } |
| 2062 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2063 | static int netvsc_register_vf(struct net_device *vf_netdev) |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2064 | { |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 2065 | struct net_device_context *net_device_ctx; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2066 | struct netvsc_device *netvsc_dev; |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 2067 | struct net_device *ndev; |
Stephen Hemminger | c0a41b8 | 2018-06-11 12:44:56 -0700 | [diff] [blame] | 2068 | int ret; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2069 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2070 | if (vf_netdev->addr_len != ETH_ALEN) |
| 2071 | return NOTIFY_DONE; |
| 2072 | |
Stephen Hemminger | 00d7ddb | 2018-09-14 12:54:57 -0700 | [diff] [blame] | 2073 | ndev = get_netvsc_byslot(vf_netdev); |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2074 | if (!ndev) |
| 2075 | return NOTIFY_DONE; |
| 2076 | |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 2077 | net_device_ctx = netdev_priv(ndev); |
stephen hemminger | 545a8e7 | 2017-03-22 14:51:00 -0700 | [diff] [blame] | 2078 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); |
Stephen Hemminger | f207c10 | 2016-09-22 16:56:33 -0700 | [diff] [blame] | 2079 | if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2080 | return NOTIFY_DONE; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2081 | |
Stephen Hemminger | c0a41b8 | 2018-06-11 12:44:56 -0700 | [diff] [blame] | 2082 | /* if syntihetic interface is a different namespace, |
| 2083 | * then move the VF to that namespace; join will be |
| 2084 | * done again in that context. |
| 2085 | */ |
| 2086 | if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { |
| 2087 | ret = dev_change_net_namespace(vf_netdev, |
| 2088 | dev_net(ndev), "eth%d"); |
| 2089 | if (ret) |
| 2090 | netdev_err(vf_netdev, |
| 2091 | "could not move to same namespace as %s: %d\n", |
| 2092 | ndev->name, ret); |
| 2093 | else |
| 2094 | netdev_info(vf_netdev, |
| 2095 | "VF moved to namespace with: %s\n", |
| 2096 | ndev->name); |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2097 | return NOTIFY_DONE; |
Stephen Hemminger | c0a41b8 | 2018-06-11 12:44:56 -0700 | [diff] [blame] | 2098 | } |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2099 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2100 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2101 | |
Stephen Hemminger | c0a41b8 | 2018-06-11 12:44:56 -0700 | [diff] [blame] | 2102 | if (netvsc_vf_join(vf_netdev, ndev) != 0) |
| 2103 | return NOTIFY_DONE; |
| 2104 | |
Stephen Hemminger | 07d0f00 | 2016-09-22 16:56:30 -0700 | [diff] [blame] | 2105 | dev_hold(vf_netdev); |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2106 | rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); |
| 2107 | return NOTIFY_OK; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2108 | } |
| 2109 | |
Stephen Hemminger | 9a0c48d | 2017-08-31 16:16:12 -0700 | [diff] [blame] | 2110 | /* VF up/down change detected, schedule to change data path */ |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2111 | static int netvsc_vf_changed(struct net_device *vf_netdev) |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2112 | { |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2113 | struct net_device_context *net_device_ctx; |
stephen hemminger | 7b83f52 | 2017-08-07 11:30:00 -0700 | [diff] [blame] | 2114 | struct netvsc_device *netvsc_dev; |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2115 | struct net_device *ndev; |
Stephen Hemminger | 9a0c48d | 2017-08-31 16:16:12 -0700 | [diff] [blame] | 2116 | bool vf_is_up = netif_running(vf_netdev); |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2117 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2118 | ndev = get_netvsc_byref(vf_netdev); |
| 2119 | if (!ndev) |
| 2120 | return NOTIFY_DONE; |
| 2121 | |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 2122 | net_device_ctx = netdev_priv(ndev); |
stephen hemminger | 7b83f52 | 2017-08-07 11:30:00 -0700 | [diff] [blame] | 2123 | netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); |
| 2124 | if (!netvsc_dev) |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2125 | return NOTIFY_DONE; |
stephen hemminger | 7b83f52 | 2017-08-07 11:30:00 -0700 | [diff] [blame] | 2126 | |
Stephen Hemminger | 9a0c48d | 2017-08-31 16:16:12 -0700 | [diff] [blame] | 2127 | netvsc_switch_datapath(ndev, vf_is_up); |
| 2128 | netdev_info(ndev, "Data path switched %s VF: %s\n", |
| 2129 | vf_is_up ? "to" : "from", vf_netdev->name); |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2130 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2131 | return NOTIFY_OK; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2132 | } |
| 2133 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2134 | static int netvsc_unregister_vf(struct net_device *vf_netdev) |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2135 | { |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2136 | struct net_device *ndev; |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 2137 | struct net_device_context *net_device_ctx; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2138 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2139 | ndev = get_netvsc_byref(vf_netdev); |
| 2140 | if (!ndev) |
| 2141 | return NOTIFY_DONE; |
| 2142 | |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 2143 | net_device_ctx = netdev_priv(ndev); |
stephen hemminger | 6123c66 | 2017-08-09 17:46:03 -0700 | [diff] [blame] | 2144 | cancel_delayed_work_sync(&net_device_ctx->vf_takeover); |
Stephen Hemminger | e8ff40d | 2016-09-22 16:56:32 -0700 | [diff] [blame] | 2145 | |
Vitaly Kuznetsov | 0a1275c | 2016-05-13 13:55:23 +0200 | [diff] [blame] | 2146 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); |
Stephen Hemminger | f207c10 | 2016-09-22 16:56:33 -0700 | [diff] [blame] | 2147 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2148 | netdev_rx_handler_unregister(vf_netdev); |
| 2149 | netdev_upper_dev_unlink(vf_netdev, ndev); |
Stephen Hemminger | f207c10 | 2016-09-22 16:56:33 -0700 | [diff] [blame] | 2150 | RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); |
Stephen Hemminger | 07d0f00 | 2016-09-22 16:56:30 -0700 | [diff] [blame] | 2151 | dev_put(vf_netdev); |
Stephen Hemminger | ec158f7 | 2017-08-31 16:16:13 -0700 | [diff] [blame] | 2152 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2153 | return NOTIFY_OK; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2154 | } |
| 2155 | |
K. Y. Srinivasan | 8494689 | 2011-09-13 10:59:38 -0700 | [diff] [blame] | 2156 | static int netvsc_probe(struct hv_device *dev, |
| 2157 | const struct hv_vmbus_device_id *dev_id) |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2158 | { |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2159 | struct net_device *net = NULL; |
| 2160 | struct net_device_context *net_device_ctx; |
| 2161 | struct netvsc_device_info device_info; |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 2162 | struct netvsc_device *nvdev; |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2163 | int ret = -ENOMEM; |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2164 | |
Haiyang Zhang | 5b54dac | 2014-04-21 10:20:28 -0700 | [diff] [blame] | 2165 | net = alloc_etherdev_mq(sizeof(struct net_device_context), |
stephen hemminger | 2b01888 | 2017-01-24 13:06:03 -0800 | [diff] [blame] | 2166 | VRSS_CHANNEL_MAX); |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2167 | if (!net) |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2168 | goto no_net; |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2169 | |
Haiyang Zhang | 1b07da5 | 2014-03-04 14:11:06 -0800 | [diff] [blame] | 2170 | netif_carrier_off(net); |
| 2171 | |
Haiyang Zhang | b37879e | 2016-08-04 10:42:14 -0700 | [diff] [blame] | 2172 | netvsc_init_settings(net); |
| 2173 | |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2174 | net_device_ctx = netdev_priv(net); |
K. Y. Srinivasan | 9efd21e | 2011-04-29 13:45:10 -0700 | [diff] [blame] | 2175 | net_device_ctx->device_ctx = dev; |
Simon Xiao | 3f300ff | 2015-04-28 01:05:17 -0700 | [diff] [blame] | 2176 | net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); |
| 2177 | if (netif_msg_probe(net_device_ctx)) |
| 2178 | netdev_dbg(net, "netvsc msg_enable: %d\n", |
| 2179 | net_device_ctx->msg_enable); |
| 2180 | |
K. Y. Srinivasan | 2ddd5e5 | 2011-09-13 10:59:49 -0700 | [diff] [blame] | 2181 | hv_set_drvdata(dev, net); |
Vitaly Kuznetsov | f580aec | 2016-05-13 13:55:20 +0200 | [diff] [blame] | 2182 | |
Haiyang Zhang | 891de74 | 2014-02-12 16:54:27 -0800 | [diff] [blame] | 2183 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2184 | |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 2185 | spin_lock_init(&net_device_ctx->lock); |
| 2186 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); |
stephen hemminger | 6123c66 | 2017-08-09 17:46:03 -0700 | [diff] [blame] | 2187 | INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2188 | |
| 2189 | net_device_ctx->vf_stats |
| 2190 | = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); |
| 2191 | if (!net_device_ctx->vf_stats) |
| 2192 | goto no_stats; |
Vitaly Kuznetsov | 27a70af | 2015-11-27 11:39:55 +0100 | [diff] [blame] | 2193 | |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2194 | net->netdev_ops = &device_ops; |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 2195 | net->ethtool_ops = ðtool_ops; |
K. Y. Srinivasan | 9efd21e | 2011-04-29 13:45:10 -0700 | [diff] [blame] | 2196 | SET_NETDEV_DEV(net, &dev->device); |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2197 | |
Vitaly Kuznetsov | 14a03cf | 2016-02-05 17:29:08 +0100 | [diff] [blame] | 2198 | /* We always need headroom for rndis header */ |
| 2199 | net->needed_headroom = RNDIS_AND_PPI_SIZE; |
| 2200 | |
Haiyang Zhang | 6450f8f | 2017-09-22 15:31:38 -0700 | [diff] [blame] | 2201 | /* Initialize the number of queues to be 1, we may change it if more |
| 2202 | * channels are offered later. |
| 2203 | */ |
| 2204 | netif_set_real_num_tx_queues(net, 1); |
| 2205 | netif_set_real_num_rx_queues(net, 1); |
| 2206 | |
Haiyang Zhang | 692e084 | 2011-09-01 12:19:43 -0700 | [diff] [blame] | 2207 | /* Notify the netvsc driver of the new device */ |
Andrew Schwartzmeyer | 8ebdcc5 | 2015-08-11 17:14:31 -0700 | [diff] [blame] | 2208 | memset(&device_info, 0, sizeof(device_info)); |
stephen hemminger | 3071ada | 2017-03-22 14:50:59 -0700 | [diff] [blame] | 2209 | device_info.num_chn = VRSS_CHANNEL_DEFAULT; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 2210 | device_info.send_sections = NETVSC_DEFAULT_TX; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 2211 | device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; |
stephen hemminger | 8b53279 | 2017-08-09 17:46:11 -0700 | [diff] [blame] | 2212 | device_info.recv_sections = NETVSC_DEFAULT_RX; |
Alex Ng | 0ab09be | 2017-09-20 11:17:35 -0700 | [diff] [blame] | 2213 | device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 2214 | |
| 2215 | nvdev = rndis_filter_device_add(dev, &device_info); |
| 2216 | if (IS_ERR(nvdev)) { |
| 2217 | ret = PTR_ERR(nvdev); |
Haiyang Zhang | 692e084 | 2011-09-01 12:19:43 -0700 | [diff] [blame] | 2218 | netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2219 | goto rndis_failed; |
Haiyang Zhang | 692e084 | 2011-09-01 12:19:43 -0700 | [diff] [blame] | 2220 | } |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2221 | |
Haiyang Zhang | 692e084 | 2011-09-01 12:19:43 -0700 | [diff] [blame] | 2222 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
| 2223 | |
Dexuan Cui | e04e7a7 | 2018-08-30 05:42:13 +0000 | [diff] [blame] | 2224 | /* We must get rtnl lock before scheduling nvdev->subchan_work, |
| 2225 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait |
| 2226 | * all subchannels to show up, but that may not happen because |
| 2227 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() |
| 2228 | * -> ... -> device_add() -> ... -> __device_attach() can't get |
| 2229 | * the device lock, so all the subchannels can't be processed -- |
| 2230 | * finally netvsc_subchan_work() hangs for ever. |
| 2231 | */ |
| 2232 | rtnl_lock(); |
| 2233 | |
Stephen Hemminger | 3ffe64f | 2018-06-29 14:07:16 -0700 | [diff] [blame] | 2234 | if (nvdev->num_chn > 1) |
| 2235 | schedule_work(&nvdev->subchan_work); |
| 2236 | |
Vitaly Kuznetsov | aefd80e | 2017-11-15 15:12:55 +0100 | [diff] [blame] | 2237 | /* hw_features computed in rndis_netdev_set_hwcaps() */ |
stephen hemminger | 23312a3 | 2017-01-24 13:05:59 -0800 | [diff] [blame] | 2238 | net->features = net->hw_features | |
| 2239 | NETIF_F_HIGHDMA | NETIF_F_SG | |
| 2240 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; |
| 2241 | net->vlan_features = net->features; |
| 2242 | |
stephen hemminger | 9749fed | 2017-07-19 11:53:16 -0700 | [diff] [blame] | 2243 | netdev_lockdep_set_classes(net); |
| 2244 | |
Jarod Wilson | d0c2c99 | 2016-10-20 13:55:21 -0400 | [diff] [blame] | 2245 | /* MTU range: 68 - 1500 or 65521 */ |
| 2246 | net->min_mtu = NETVSC_MTU_MIN; |
| 2247 | if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) |
| 2248 | net->max_mtu = NETVSC_MTU - ETH_HLEN; |
| 2249 | else |
| 2250 | net->max_mtu = ETH_DATA_LEN; |
| 2251 | |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 2252 | ret = register_netdevice(net); |
Haiyang Zhang | a68f961 | 2013-12-20 16:52:31 -0800 | [diff] [blame] | 2253 | if (ret != 0) { |
| 2254 | pr_err("Unable to register netdev.\n"); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2255 | goto register_failed; |
Haiyang Zhang | a68f961 | 2013-12-20 16:52:31 -0800 | [diff] [blame] | 2256 | } |
| 2257 | |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 2258 | list_add(&net_device_ctx->list, &netvsc_dev_list); |
| 2259 | rtnl_unlock(); |
| 2260 | return 0; |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2261 | |
| 2262 | register_failed: |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 2263 | rtnl_unlock(); |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2264 | rndis_filter_device_remove(dev, nvdev); |
| 2265 | rndis_failed: |
| 2266 | free_percpu(net_device_ctx->vf_stats); |
| 2267 | no_stats: |
| 2268 | hv_set_drvdata(dev, NULL); |
| 2269 | free_netdev(net); |
| 2270 | no_net: |
| 2271 | return ret; |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2272 | } |
| 2273 | |
K. Y. Srinivasan | 415b023 | 2011-04-29 13:45:12 -0700 | [diff] [blame] | 2274 | static int netvsc_remove(struct hv_device *dev) |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2275 | { |
Haiyang Zhang | 122a5f6 | 2011-05-27 06:21:55 -0700 | [diff] [blame] | 2276 | struct net_device_context *ndev_ctx; |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 2277 | struct net_device *vf_netdev, *net; |
| 2278 | struct netvsc_device *nvdev; |
K. Y. Srinivasan | 2ddd5e5 | 2011-09-13 10:59:49 -0700 | [diff] [blame] | 2279 | |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 2280 | net = hv_get_drvdata(dev); |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2281 | if (net == NULL) { |
K. Y. Srinivasan | 415b023 | 2011-04-29 13:45:12 -0700 | [diff] [blame] | 2282 | dev_err(&dev->device, "No net device to remove\n"); |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2283 | return 0; |
| 2284 | } |
| 2285 | |
Haiyang Zhang | 122a5f6 | 2011-05-27 06:21:55 -0700 | [diff] [blame] | 2286 | ndev_ctx = netdev_priv(net); |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 2287 | |
Haiyang Zhang | 122a5f6 | 2011-05-27 06:21:55 -0700 | [diff] [blame] | 2288 | cancel_delayed_work_sync(&ndev_ctx->dwork); |
| 2289 | |
Stephen Hemminger | 018349d | 2018-09-13 08:03:43 -0700 | [diff] [blame] | 2290 | rtnl_lock(); |
| 2291 | nvdev = rtnl_dereference(ndev_ctx->nvdev); |
| 2292 | if (nvdev) |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 2293 | cancel_work_sync(&nvdev->subchan_work); |
| 2294 | |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2295 | /* |
| 2296 | * Call to the vsc driver to let it know that the device is being |
stephen hemminger | a0be450 | 2017-03-22 14:51:01 -0700 | [diff] [blame] | 2297 | * removed. Also blocks mtu and channel changes. |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2298 | */ |
Stephen Hemminger | ec158f7 | 2017-08-31 16:16:13 -0700 | [diff] [blame] | 2299 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
| 2300 | if (vf_netdev) |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2301 | netvsc_unregister_vf(vf_netdev); |
Stephen Hemminger | ec158f7 | 2017-08-31 16:16:13 -0700 | [diff] [blame] | 2302 | |
Stephen Hemminger | 7b2ee50 | 2018-03-20 15:03:05 -0700 | [diff] [blame] | 2303 | if (nvdev) |
| 2304 | rndis_filter_device_remove(dev, nvdev); |
| 2305 | |
Stephen Hemminger | 8195b13 | 2017-09-06 13:53:05 -0700 | [diff] [blame] | 2306 | unregister_netdevice(net); |
Stephen Hemminger | 7bf7bb3 | 2018-06-11 12:44:55 -0700 | [diff] [blame] | 2307 | list_del(&ndev_ctx->list); |
Stephen Hemminger | 8195b13 | 2017-09-06 13:53:05 -0700 | [diff] [blame] | 2308 | |
stephen hemminger | a0be450 | 2017-03-22 14:51:01 -0700 | [diff] [blame] | 2309 | rtnl_unlock(); |
| 2310 | |
Vitaly Kuznetsov | 3d541ac | 2016-05-13 13:55:22 +0200 | [diff] [blame] | 2311 | hv_set_drvdata(dev, NULL); |
| 2312 | |
stephen hemminger | 0c19556 | 2017-08-01 19:58:53 -0700 | [diff] [blame] | 2313 | free_percpu(ndev_ctx->vf_stats); |
Simon Xiao | 6c80f3f | 2017-01-24 13:06:13 -0800 | [diff] [blame] | 2314 | free_netdev(net); |
Haiyang Zhang | df06bcf | 2011-05-23 09:03:47 -0700 | [diff] [blame] | 2315 | return 0; |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2316 | } |
| 2317 | |
K. Y. Srinivasan | 345c4cc | 2011-08-25 09:48:34 -0700 | [diff] [blame] | 2318 | static const struct hv_vmbus_device_id id_table[] = { |
Greg Kroah-Hartman | c45cf2d | 2011-08-25 11:41:33 -0700 | [diff] [blame] | 2319 | /* Network guid */ |
K. Y. Srinivasan | 8f50594 | 2013-01-23 17:42:42 -0800 | [diff] [blame] | 2320 | { HV_NIC_GUID, }, |
Greg Kroah-Hartman | c45cf2d | 2011-08-25 11:41:33 -0700 | [diff] [blame] | 2321 | { }, |
K. Y. Srinivasan | 345c4cc | 2011-08-25 09:48:34 -0700 | [diff] [blame] | 2322 | }; |
| 2323 | |
| 2324 | MODULE_DEVICE_TABLE(vmbus, id_table); |
| 2325 | |
K. Y. Srinivasan | f1542a6 | 2011-05-10 07:55:16 -0700 | [diff] [blame] | 2326 | /* The one and only one */ |
K. Y. Srinivasan | fde0ef9 | 2011-05-12 19:35:08 -0700 | [diff] [blame] | 2327 | static struct hv_driver netvsc_drv = { |
Haiyang Zhang | d31b20f | 2012-03-07 10:02:00 +0000 | [diff] [blame] | 2328 | .name = KBUILD_MODNAME, |
K. Y. Srinivasan | 345c4cc | 2011-08-25 09:48:34 -0700 | [diff] [blame] | 2329 | .id_table = id_table, |
K. Y. Srinivasan | fde0ef9 | 2011-05-12 19:35:08 -0700 | [diff] [blame] | 2330 | .probe = netvsc_probe, |
| 2331 | .remove = netvsc_remove, |
Arjan van de Ven | af0a564 | 2018-06-05 13:37:49 -0700 | [diff] [blame] | 2332 | .driver = { |
| 2333 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, |
| 2334 | }, |
K. Y. Srinivasan | d489097 | 2011-05-10 07:55:17 -0700 | [diff] [blame] | 2335 | }; |
K. Y. Srinivasan | f1542a6 | 2011-05-10 07:55:16 -0700 | [diff] [blame] | 2336 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2337 | /* |
| 2338 | * On Hyper-V, every VF interface is matched with a corresponding |
| 2339 | * synthetic interface. The synthetic interface is presented first |
| 2340 | * to the guest. When the corresponding VF instance is registered, |
| 2341 | * we will take care of switching the data path. |
| 2342 | */ |
| 2343 | static int netvsc_netdev_event(struct notifier_block *this, |
| 2344 | unsigned long event, void *ptr) |
| 2345 | { |
| 2346 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); |
| 2347 | |
| 2348 | /* Skip our own events */ |
| 2349 | if (event_dev->netdev_ops == &device_ops) |
| 2350 | return NOTIFY_DONE; |
| 2351 | |
| 2352 | /* Avoid non-Ethernet type devices */ |
| 2353 | if (event_dev->type != ARPHRD_ETHER) |
| 2354 | return NOTIFY_DONE; |
| 2355 | |
| 2356 | /* Avoid Vlan dev with same MAC registering as VF */ |
| 2357 | if (is_vlan_dev(event_dev)) |
| 2358 | return NOTIFY_DONE; |
| 2359 | |
| 2360 | /* Avoid Bonding master dev with same MAC registering as VF */ |
| 2361 | if ((event_dev->priv_flags & IFF_BONDING) && |
| 2362 | (event_dev->flags & IFF_MASTER)) |
| 2363 | return NOTIFY_DONE; |
| 2364 | |
| 2365 | switch (event) { |
| 2366 | case NETDEV_REGISTER: |
| 2367 | return netvsc_register_vf(event_dev); |
| 2368 | case NETDEV_UNREGISTER: |
| 2369 | return netvsc_unregister_vf(event_dev); |
| 2370 | case NETDEV_UP: |
| 2371 | case NETDEV_DOWN: |
| 2372 | return netvsc_vf_changed(event_dev); |
| 2373 | default: |
| 2374 | return NOTIFY_DONE; |
| 2375 | } |
| 2376 | } |
| 2377 | |
| 2378 | static struct notifier_block netvsc_netdev_notifier = { |
| 2379 | .notifier_call = netvsc_netdev_event, |
| 2380 | }; |
| 2381 | |
K. Y. Srinivasan | a9869c9 | 2011-05-12 19:35:17 -0700 | [diff] [blame] | 2382 | static void __exit netvsc_drv_exit(void) |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 2383 | { |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2384 | unregister_netdevice_notifier(&netvsc_netdev_notifier); |
Greg Kroah-Hartman | 768fa21 | 2011-08-25 15:07:32 -0700 | [diff] [blame] | 2385 | vmbus_driver_unregister(&netvsc_drv); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 2386 | } |
| 2387 | |
K. Y. Srinivasan | 1fde28c | 2011-05-12 19:35:16 -0700 | [diff] [blame] | 2388 | static int __init netvsc_drv_init(void) |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2389 | { |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2390 | int ret; |
| 2391 | |
Haiyang Zhang | fa85a6c | 2012-07-25 08:08:41 +0000 | [diff] [blame] | 2392 | if (ring_size < RING_SIZE_MIN) { |
| 2393 | ring_size = RING_SIZE_MIN; |
Stephen Hemminger | a7f99d0 | 2017-12-01 11:01:47 -0800 | [diff] [blame] | 2394 | pr_info("Increased ring_size to %u (min allowed)\n", |
Haiyang Zhang | fa85a6c | 2012-07-25 08:08:41 +0000 | [diff] [blame] | 2395 | ring_size); |
| 2396 | } |
Stephen Hemminger | a7f99d0 | 2017-12-01 11:01:47 -0800 | [diff] [blame] | 2397 | netvsc_ring_bytes = ring_size * PAGE_SIZE; |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2398 | |
Stephen Hemminger | a7f99d0 | 2017-12-01 11:01:47 -0800 | [diff] [blame] | 2399 | ret = vmbus_driver_register(&netvsc_drv); |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2400 | if (ret) |
| 2401 | return ret; |
| 2402 | |
Stephen Hemminger | 8cde8f0 | 2018-06-11 12:44:54 -0700 | [diff] [blame] | 2403 | register_netdevice_notifier(&netvsc_netdev_notifier); |
KY Srinivasan | 84bf9ce | 2016-04-14 16:31:54 -0700 | [diff] [blame] | 2404 | return 0; |
Greg Kroah-Hartman | df2fff2 | 2009-08-31 21:11:12 -0700 | [diff] [blame] | 2405 | } |
| 2406 | |
Hank Janssen | 26c14cc | 2010-02-11 23:02:42 +0000 | [diff] [blame] | 2407 | MODULE_LICENSE("GPL"); |
Stephen Hemminger | 7880fc5 | 2010-05-04 09:58:52 -0700 | [diff] [blame] | 2408 | MODULE_DESCRIPTION("Microsoft Hyper-V network driver"); |
Hank Janssen | fceaf24 | 2009-07-13 15:34:54 -0700 | [diff] [blame] | 2409 | |
K. Y. Srinivasan | 1fde28c | 2011-05-12 19:35:16 -0700 | [diff] [blame] | 2410 | module_init(netvsc_drv_init); |
K. Y. Srinivasan | a9869c9 | 2011-05-12 19:35:17 -0700 | [diff] [blame] | 2411 | module_exit(netvsc_drv_exit); |