blob: 7b18a8c267c2b88d2c5e84bfa87da9691883c154 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070036#include <linux/rtnetlink.h>
stephen hemminger0c195562017-08-01 19:58:53 -070037#include <linux/netpoll.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070038
Hank Janssenfceaf242009-07-13 15:34:54 -070039#include <net/arp.h>
40#include <net/route.h>
41#include <net/sock.h>
42#include <net/pkt_sched.h>
Michael Kelley8eb1b3c2017-05-30 11:36:56 -070043#include <net/checksum.h>
44#include <net/ip6_checksum.h>
Sridhar Samudrala1ff78072018-05-24 09:55:14 -070045#include <net/failover.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070046
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070047#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070048
Stephen Hemminger7b2ee502018-03-20 15:03:05 -070049#define RING_SIZE_MIN 64
50#define RETRY_US_LO 5000
51#define RETRY_US_HI 10000
52#define RETRY_MAX 2000 /* >10 sec */
stephen hemminger8b532792017-08-09 17:46:11 -070053
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010054#define LINKCHANGE_INT (2 * HZ)
stephen hemminger6123c662017-08-09 17:46:03 -070055#define VF_TAKEOVER_INT (HZ / 10)
stephen hemmingera50af862016-12-06 13:43:54 -080056
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080057static unsigned int ring_size __ro_after_init = 128;
Joe Perchesd61e4032018-03-23 15:54:39 -070058module_param(ring_size, uint, 0444);
Stephen Hemminger450d7a42010-05-04 09:58:53 -070059MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080060unsigned int netvsc_ring_bytes __ro_after_init;
Hank Janssenfceaf242009-07-13 15:34:54 -070061
Simon Xiao3f300ff2015-04-28 01:05:17 -070062static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
63 NETIF_MSG_LINK | NETIF_MSG_IFUP |
64 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
65 NETIF_MSG_TX_ERR;
66
67static int debug = -1;
Joe Perchesd61e4032018-03-23 15:54:39 -070068module_param(debug, int, 0444);
Simon Xiao3f300ff2015-04-28 01:05:17 -070069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080071static void netvsc_change_rx_flags(struct net_device *net, int change)
Hank Janssenfceaf242009-07-13 15:34:54 -070072{
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080073 struct net_device_context *ndev_ctx = netdev_priv(net);
74 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
75 int inc;
76
77 if (!vf_netdev)
78 return;
79
80 if (change & IFF_PROMISC) {
81 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
82 dev_set_promiscuity(vf_netdev, inc);
83 }
84
85 if (change & IFF_ALLMULTI) {
86 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
87 dev_set_allmulti(vf_netdev, inc);
88 }
89}
90
91static void netvsc_set_rx_mode(struct net_device *net)
92{
93 struct net_device_context *ndev_ctx = netdev_priv(net);
Stephen Hemminger35a57b72018-03-07 13:49:11 -080094 struct net_device *vf_netdev;
95 struct netvsc_device *nvdev;
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080096
Stephen Hemminger35a57b72018-03-07 13:49:11 -080097 rcu_read_lock();
98 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080099 if (vf_netdev) {
100 dev_uc_sync(vf_netdev, net);
101 dev_mc_sync(vf_netdev, net);
102 }
Haiyang Zhangd426b2e2011-11-30 07:19:08 -0800103
Stephen Hemminger35a57b72018-03-07 13:49:11 -0800104 nvdev = rcu_dereference(ndev_ctx->nvdev);
105 if (nvdev)
106 rndis_filter_update(nvdev);
107 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700108}
109
Hank Janssenfceaf242009-07-13 15:34:54 -0700110static int netvsc_open(struct net_device *net)
111{
Haiyang Zhang53fa1a62017-06-21 16:40:47 -0700112 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -0700113 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -0700114 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
Haiyang Zhang891de742014-02-12 16:54:27 -0800115 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700116 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700117
Haiyang Zhang891de742014-02-12 16:54:27 -0800118 netif_carrier_off(net);
119
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700120 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200121 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700122 if (ret != 0) {
123 netdev_err(net, "unable to open device (ret %d).\n", ret);
124 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700125 }
126
Haiyang Zhang891de742014-02-12 16:54:27 -0800127 rdev = nvdev->extension;
Dexuan Cui52acf732018-06-06 21:32:51 +0000128 if (!rdev->link_state) {
Haiyang Zhang891de742014-02-12 16:54:27 -0800129 netif_carrier_on(net);
Dexuan Cui52acf732018-06-06 21:32:51 +0000130 netif_tx_wake_all_queues(net);
131 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800132
stephen hemminger0c195562017-08-01 19:58:53 -0700133 if (vf_netdev) {
134 /* Setting synthetic device up transparently sets
135 * slave as up. If open fails, then slave will be
136 * still be offline (and not used).
137 */
138 ret = dev_open(vf_netdev);
139 if (ret)
140 netdev_warn(net,
141 "unable to open slave: %s: %d\n",
142 vf_netdev->name, ret);
143 }
144 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700145}
146
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700147static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
Hank Janssenfceaf242009-07-13 15:34:54 -0700148{
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700149 unsigned int retry = 0;
150 int i;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700151
152 /* Ensure pending bytes in ring are read */
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700153 for (;;) {
154 u32 aread = 0;
155
Haiyang Zhang2de85302015-07-13 13:09:16 -0700156 for (i = 0; i < nvdev->num_chn; i++) {
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700157 struct vmbus_channel *chn
158 = nvdev->chan_table[i].channel;
159
Haiyang Zhang2de85302015-07-13 13:09:16 -0700160 if (!chn)
161 continue;
162
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700163 /* make sure receive not running now */
164 napi_synchronize(&nvdev->chan_table[i].napi);
165
stephen hemminger40975962017-06-08 16:21:19 -0700166 aread = hv_get_bytes_to_read(&chn->inbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700167 if (aread)
168 break;
169
stephen hemminger40975962017-06-08 16:21:19 -0700170 aread = hv_get_bytes_to_read(&chn->outbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700171 if (aread)
172 break;
173 }
174
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700175 if (aread == 0)
176 return 0;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700177
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700178 if (++retry > RETRY_MAX)
179 return -ETIMEDOUT;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700180
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700181 usleep_range(RETRY_US_LO, RETRY_US_HI);
182 }
183}
184
185static int netvsc_close(struct net_device *net)
186{
187 struct net_device_context *net_device_ctx = netdev_priv(net);
188 struct net_device *vf_netdev
189 = rtnl_dereference(net_device_ctx->vf_netdev);
190 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
191 int ret;
192
193 netif_tx_disable(net);
194
195 /* No need to close rndis filter if it is removed already */
196 if (!nvdev)
197 return 0;
198
199 ret = rndis_filter_close(nvdev);
200 if (ret != 0) {
201 netdev_err(net, "unable to close device (ret %d).\n", ret);
202 return ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700203 }
204
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700205 ret = netvsc_wait_until_empty(nvdev);
206 if (ret)
Haiyang Zhang2de85302015-07-13 13:09:16 -0700207 netdev_err(net, "Ring buffer not empty after closing rndis\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700208
stephen hemminger0c195562017-08-01 19:58:53 -0700209 if (vf_netdev)
210 dev_close(vf_netdev);
211
Hank Janssenfceaf242009-07-13 15:34:54 -0700212 return ret;
213}
214
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800215static inline void *init_ppi_data(struct rndis_message *msg,
216 u32 ppi_size, u32 pkt_type)
KY Srinivasan8a002512014-03-08 19:23:14 -0800217{
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800218 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
KY Srinivasan8a002512014-03-08 19:23:14 -0800219 struct rndis_per_packet_info *ppi;
220
KY Srinivasan8a002512014-03-08 19:23:14 -0800221 rndis_pkt->data_offset += ppi_size;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800222 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
223 + rndis_pkt->per_pkt_info_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800224
225 ppi->size = ppi_size;
226 ppi->type = pkt_type;
227 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
228
229 rndis_pkt->per_pkt_info_len += ppi_size;
230
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800231 return ppi + 1;
KY Srinivasan8a002512014-03-08 19:23:14 -0800232}
233
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700234/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
235 * packets. We can use ethtool to change UDP hash level when necessary.
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700236 */
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700237static inline u32 netvsc_get_hash(
238 struct sk_buff *skb,
239 const struct net_device_context *ndc)
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700240{
241 struct flow_keys flow;
Haiyang Zhang486e3982017-10-06 08:33:57 -0700242 u32 hash, pkt_proto = 0;
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700243 static u32 hashrnd __read_mostly;
244
245 net_get_random_once(&hashrnd, sizeof(hashrnd));
246
247 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
248 return 0;
249
Haiyang Zhang486e3982017-10-06 08:33:57 -0700250 switch (flow.basic.ip_proto) {
251 case IPPROTO_TCP:
252 if (flow.basic.n_proto == htons(ETH_P_IP))
253 pkt_proto = HV_TCP4_L4HASH;
254 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
255 pkt_proto = HV_TCP6_L4HASH;
256
257 break;
258
259 case IPPROTO_UDP:
260 if (flow.basic.n_proto == htons(ETH_P_IP))
261 pkt_proto = HV_UDP4_L4HASH;
262 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
263 pkt_proto = HV_UDP6_L4HASH;
264
265 break;
266 }
267
268 if (pkt_proto & ndc->l4_hash) {
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700269 return skb_get_hash(skb);
270 } else {
271 if (flow.basic.n_proto == htons(ETH_P_IP))
272 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
273 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
274 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
275 else
276 hash = 0;
277
278 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
279 }
280
281 return hash;
282}
283
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700284static inline int netvsc_get_tx_queue(struct net_device *ndev,
285 struct sk_buff *skb, int old_idx)
286{
287 const struct net_device_context *ndc = netdev_priv(ndev);
288 struct sock *sk = skb->sk;
289 int q_idx;
290
Haiyang Zhang39e91cf2017-10-13 12:28:04 -0700291 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
292 (VRSS_SEND_TAB_SIZE - 1)];
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700293
294 /* If queue index changed record the new value */
295 if (q_idx != old_idx &&
296 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
297 sk_tx_queue_set(sk, q_idx);
298
299 return q_idx;
300}
301
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800302/*
303 * Select queue for transmit.
304 *
305 * If a valid queue has already been assigned, then use that.
306 * Otherwise compute tx queue based on hash and the send table.
307 *
308 * This is basically similar to default (__netdev_pick_tx) with the added step
309 * of using the host send_table when no other queue has been assigned.
310 *
311 * TODO support XPS - but get_xps_queue not exported
312 */
stephen hemminger0c195562017-08-01 19:58:53 -0700313static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700314{
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700315 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700316
stephen hemminger0c195562017-08-01 19:58:53 -0700317 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700318 /* If forwarding a packet, we use the recorded queue when
319 * available for better cache locality.
320 */
321 if (skb_rx_queue_recorded(skb))
322 q_idx = skb_get_rx_queue(skb);
323 else
324 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800325 }
326
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700327 return q_idx;
328}
329
stephen hemminger0c195562017-08-01 19:58:53 -0700330static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
331 void *accel_priv,
332 select_queue_fallback_t fallback)
333{
334 struct net_device_context *ndc = netdev_priv(ndev);
335 struct net_device *vf_netdev;
336 u16 txq;
337
338 rcu_read_lock();
339 vf_netdev = rcu_dereference(ndc->vf_netdev);
340 if (vf_netdev) {
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800341 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
342
343 if (vf_ops->ndo_select_queue)
344 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
345 accel_priv, fallback);
346 else
347 txq = fallback(vf_netdev, skb);
348
349 /* Record the queue selected by VF so that it can be
350 * used for common case where VF has more queues than
351 * the synthetic device.
352 */
353 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
stephen hemminger0c195562017-08-01 19:58:53 -0700354 } else {
355 txq = netvsc_pick_tx(ndev, skb);
356 }
357 rcu_read_unlock();
358
359 while (unlikely(txq >= ndev->real_num_tx_queues))
360 txq -= ndev->real_num_tx_queues;
361
362 return txq;
363}
364
KY Srinivasan54a73572014-03-08 19:23:13 -0800365static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
stephen hemminger89bb42b2017-08-09 17:46:08 -0700366 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800367{
368 int j = 0;
369
370 /* Deal with compund pages by ignoring unused part
371 * of the page.
372 */
373 page += (offset >> PAGE_SHIFT);
374 offset &= ~PAGE_MASK;
375
376 while (len > 0) {
377 unsigned long bytes;
378
379 bytes = PAGE_SIZE - offset;
380 if (bytes > len)
381 bytes = len;
382 pb[j].pfn = page_to_pfn(page);
383 pb[j].offset = offset;
384 pb[j].len = bytes;
385
386 offset += bytes;
387 len -= bytes;
388
389 if (offset == PAGE_SIZE && len) {
390 page++;
391 offset = 0;
392 j++;
393 }
394 }
395
396 return j + 1;
397}
398
KY Srinivasan8a002512014-03-08 19:23:14 -0800399static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800400 struct hv_netvsc_packet *packet,
stephen hemminger02b6de02017-07-28 08:59:44 -0700401 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800402{
403 u32 slots_used = 0;
404 char *data = skb->data;
405 int frags = skb_shinfo(skb)->nr_frags;
406 int i;
407
408 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700409 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800410 * 2. skb linear data
411 * 3. skb fragment data
412 */
stephen hemmingerea5a32c2017-08-09 17:46:10 -0700413 slots_used += fill_pg_buf(virt_to_page(hdr),
414 offset_in_page(hdr),
415 len, &pb[slots_used]);
KY Srinivasan54a73572014-03-08 19:23:13 -0800416
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700417 packet->rmsg_size = len;
418 packet->rmsg_pgcnt = slots_used;
419
KY Srinivasan54a73572014-03-08 19:23:13 -0800420 slots_used += fill_pg_buf(virt_to_page(data),
421 offset_in_page(data),
422 skb_headlen(skb), &pb[slots_used]);
423
424 for (i = 0; i < frags; i++) {
425 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
426
427 slots_used += fill_pg_buf(skb_frag_page(frag),
428 frag->page_offset,
429 skb_frag_size(frag), &pb[slots_used]);
430 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800431 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800432}
433
stephen hemminger80d887d2017-07-24 21:03:19 -0700434static int count_skb_frag_slots(struct sk_buff *skb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800435{
stephen hemminger80d887d2017-07-24 21:03:19 -0700436 int i, frags = skb_shinfo(skb)->nr_frags;
437 int pages = 0;
438
439 for (i = 0; i < frags; i++) {
440 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
441 unsigned long size = skb_frag_size(frag);
442 unsigned long offset = frag->page_offset;
443
444 /* Skip unused frames from start of page */
445 offset &= ~PAGE_MASK;
446 pages += PFN_UP(offset + size);
447 }
448 return pages;
449}
450
451static int netvsc_get_slots(struct sk_buff *skb)
452{
453 char *data = skb->data;
454 unsigned int offset = offset_in_page(data);
455 unsigned int len = skb_headlen(skb);
456 int slots;
457 int frag_slots;
458
459 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
460 frag_slots = count_skb_frag_slots(skb);
461 return slots + frag_slots;
KY Srinivasan54a73572014-03-08 19:23:13 -0800462}
463
stephen hemminger23312a32017-01-24 13:05:59 -0800464static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800465{
stephen hemminger23312a32017-01-24 13:05:59 -0800466 if (skb->protocol == htons(ETH_P_IP)) {
467 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800468
stephen hemminger23312a32017-01-24 13:05:59 -0800469 if (ip->protocol == IPPROTO_TCP)
470 return TRANSPORT_INFO_IPV4_TCP;
471 else if (ip->protocol == IPPROTO_UDP)
472 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800473 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800474 struct ipv6hdr *ip6 = ipv6_hdr(skb);
475
476 if (ip6->nexthdr == IPPROTO_TCP)
477 return TRANSPORT_INFO_IPV6_TCP;
Mohammed Gamal37b9dfa2017-07-24 10:57:26 -0700478 else if (ip6->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800479 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800480 }
481
stephen hemminger23312a32017-01-24 13:05:59 -0800482 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800483}
484
stephen hemminger0c195562017-08-01 19:58:53 -0700485/* Send skb on the slave VF device. */
486static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
487 struct sk_buff *skb)
488{
489 struct net_device_context *ndev_ctx = netdev_priv(net);
490 unsigned int len = skb->len;
491 int rc;
492
493 skb->dev = vf_netdev;
494 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
495
496 rc = dev_queue_xmit(skb);
497 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
498 struct netvsc_vf_pcpu_stats *pcpu_stats
499 = this_cpu_ptr(ndev_ctx->vf_stats);
500
501 u64_stats_update_begin(&pcpu_stats->syncp);
502 pcpu_stats->tx_packets++;
503 pcpu_stats->tx_bytes += len;
504 u64_stats_update_end(&pcpu_stats->syncp);
505 } else {
506 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
507 }
508
509 return rc;
510}
511
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700512static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700513{
Hank Janssenfceaf242009-07-13 15:34:54 -0700514 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200515 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700516 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800517 unsigned int num_data_pgs;
518 struct rndis_message *rndis_msg;
stephen hemminger0c195562017-08-01 19:58:53 -0700519 struct net_device *vf_netdev;
KY Srinivasan8a002512014-03-08 19:23:14 -0800520 u32 rndis_msg_size;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700521 u32 hash;
stephen hemminger02b6de02017-07-28 08:59:44 -0700522 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
Hank Janssenfceaf242009-07-13 15:34:54 -0700523
stephen hemminger0c195562017-08-01 19:58:53 -0700524 /* if VF is present and up then redirect packets
525 * already called with rcu_read_lock_bh
526 */
527 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
528 if (vf_netdev && netif_running(vf_netdev) &&
529 !netpoll_tx_running(net))
530 return netvsc_vf_xmit(net, vf_netdev, skb);
531
stephen hemminger80d887d2017-07-24 21:03:19 -0700532 /* We will atmost need two pages to describe the rndis
533 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200534 * of pages in a single packet. If skb is scattered around
535 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800536 */
stephen hemminger80d887d2017-07-24 21:03:19 -0700537
538 num_data_pgs = netvsc_get_slots(skb) + 2;
539
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700540 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700541 ++net_device_ctx->eth_stats.tx_scattered;
542
543 if (skb_linearize(skb))
544 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700545
stephen hemminger80d887d2017-07-24 21:03:19 -0700546 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700547 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700548 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700549 goto drop;
550 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800551 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700552
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800553 /*
554 * Place the rndis header in the skb head room and
555 * the skb->cb will be used for hv_netvsc_packet
556 * structure.
557 */
558 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700559 if (ret)
560 goto no_memory;
561
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800562 /* Use the skb control buffer for building up the packet */
563 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
564 FIELD_SIZEOF(struct sk_buff, cb));
565 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700566
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700567 packet->q_idx = skb_get_queue_mapping(skb);
568
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800569 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800570 packet->total_bytes = skb->len;
571 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700572
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800573 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700574
KY Srinivasan8a002512014-03-08 19:23:14 -0800575 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800576 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
577 rndis_msg->msg_len = packet->total_data_buflen;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800578
579 rndis_msg->msg.pkt = (struct rndis_packet) {
580 .data_offset = sizeof(struct rndis_packet),
581 .data_len = packet->total_data_buflen,
582 .per_pkt_info_offset = sizeof(struct rndis_packet),
583 };
KY Srinivasan8a002512014-03-08 19:23:14 -0800584
585 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
586
Haiyang Zhang307f0992014-05-21 12:55:39 -0700587 hash = skb_get_hash_raw(skb);
588 if (hash != 0 && net->real_num_tx_queues > 1) {
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800589 u32 *hash_info;
590
Haiyang Zhang307f0992014-05-21 12:55:39 -0700591 rndis_msg_size += NDIS_HASH_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800592 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
593 NBL_HASH_VALUE);
594 *hash_info = hash;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700595 }
596
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700597 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800598 struct ndis_pkt_8021q_info *vlan;
599
600 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800601 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
602 IEEE_8021Q_INFO);
stephen hemminger00f50242017-08-09 17:46:09 -0700603
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800604 vlan->value = 0;
KY Srinivasan760d1e32015-12-01 16:43:19 -0800605 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
606 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800607 VLAN_PRIO_SHIFT;
608 }
609
stephen hemminger23312a32017-01-24 13:05:59 -0800610 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700611 struct ndis_tcp_lso_info *lso_info;
612
613 rndis_msg_size += NDIS_LSO_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800614 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
615 TCP_LARGESEND_PKTINFO);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700616
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800617 lso_info->value = 0;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700618 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800619 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700620 lso_info->lso_v2_transmit.ip_version =
621 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
622 ip_hdr(skb)->tot_len = 0;
623 ip_hdr(skb)->check = 0;
624 tcp_hdr(skb)->check =
625 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
626 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
627 } else {
628 lso_info->lso_v2_transmit.ip_version =
629 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
630 ipv6_hdr(skb)->payload_len = 0;
631 tcp_hdr(skb)->check =
632 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
633 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
634 }
stephen hemminger23312a32017-01-24 13:05:59 -0800635 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700636 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700637 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800638 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
639 struct ndis_tcp_ip_checksum_info *csum_info;
640
stephen hemmingerad19bc82016-10-11 14:03:07 -0700641 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800642 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
643 TCPIP_CHKSUM_PKTINFO);
stephen hemmingerad19bc82016-10-11 14:03:07 -0700644
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800645 csum_info->value = 0;
stephen hemminger23312a32017-01-24 13:05:59 -0800646 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
647
648 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700649 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800650
651 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
652 csum_info->transmit.tcp_checksum = 1;
653 else
654 csum_info->transmit.udp_checksum = 1;
655 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700656 csum_info->transmit.is_ipv6 = 1;
657
stephen hemminger23312a32017-01-24 13:05:59 -0800658 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
659 csum_info->transmit.tcp_checksum = 1;
660 else
661 csum_info->transmit.udp_checksum = 1;
662 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700663 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800664 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700665 if (skb_checksum_help(skb))
666 goto drop;
667 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700668 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800669
KY Srinivasan8a002512014-03-08 19:23:14 -0800670 /* Start filling in the page buffers with the rndis hdr */
671 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700672 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800673 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
stephen hemminger02b6de02017-07-28 08:59:44 -0700674 skb, packet, pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800675
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800676 /* timestamp packet in software */
677 skb_tx_timestamp(skb);
stephen hemminger2a926f72017-07-19 11:53:17 -0700678
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -0800679 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800680 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700681 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700682
683 if (ret == -EAGAIN) {
684 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700685 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700686 }
687
688 if (ret == -ENOSPC)
689 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700690
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700691drop:
692 dev_kfree_skb_any(skb);
693 net->stats.tx_dropped++;
694
695 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700696
697no_memory:
698 ++net_device_ctx->eth_stats.tx_no_memory;
699 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700700}
stephen hemminger89bb42b2017-08-09 17:46:08 -0700701
Hank Janssen3e189512010-03-04 22:11:00 +0000702/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700703 * netvsc_linkstatus_callback - Link up/down notification
704 */
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800705void netvsc_linkstatus_callback(struct net_device *net,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700706 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700707{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700708 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800709 struct net_device_context *ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100710 struct netvsc_reconfig *event;
711 unsigned long flags;
712
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700713 /* Update the physical link speed when changing to another vSwitch */
714 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
715 u32 speed;
716
stephen hemminger89bb42b2017-08-09 17:46:08 -0700717 speed = *(u32 *)((void *)indicate
718 + indicate->status_buf_offset) / 10000;
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700719 ndev_ctx->speed = speed;
720 return;
721 }
722
723 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100724 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
725 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
726 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
727 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700728
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700729 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700730 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700731
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100732 event = kzalloc(sizeof(*event), GFP_ATOMIC);
733 if (!event)
734 return;
735 event->event = indicate->status;
736
737 spin_lock_irqsave(&ndev_ctx->lock, flags);
738 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
739 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
740
741 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700742}
743
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700744static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800745 struct napi_struct *napi,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800746 const struct ndis_tcp_ip_checksum_info *csum_info,
747 const struct ndis_pkt_8021q_info *vlan,
748 void *data, u32 buflen)
Hank Janssenfceaf242009-07-13 15:34:54 -0700749{
Hank Janssenfceaf242009-07-13 15:34:54 -0700750 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700751
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800752 skb = napi_alloc_skb(napi, buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700753 if (!skb)
754 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700755
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700756 /*
757 * Copy to skb. This copy is needed here since the memory pointed by
758 * hv_netvsc_packet cannot be deallocated
759 */
Johannes Berg59ae1d12017-06-16 14:29:20 +0200760 skb_put_data(skb, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700761
762 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700763
764 /* skb is already created with CHECKSUM_NONE */
765 skb_checksum_none_assert(skb);
766
767 /*
768 * In Linux, the IP checksum is always checked.
769 * Do L4 checksum offload if enabled and present.
770 */
771 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
772 if (csum_info->receive.tcp_checksum_succeeded ||
773 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800774 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800775 }
776
stephen hemmingerdc54a082017-01-24 13:06:08 -0800777 if (vlan) {
778 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
779
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700780 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800781 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800782 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700783
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700784 return skb;
785}
786
787/*
788 * netvsc_recv_callback - Callback when we receive a packet from the
789 * "wire" on the specified device.
790 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800791int netvsc_recv_callback(struct net_device *net,
Stephen Hemminger345ac082017-12-12 16:48:38 -0800792 struct netvsc_device *net_device,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800793 struct vmbus_channel *channel,
794 void *data, u32 len,
795 const struct ndis_tcp_ip_checksum_info *csum_info,
796 const struct ndis_pkt_8021q_info *vlan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700797{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200798 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger742fe542017-02-27 10:26:50 -0800799 u16 q_idx = channel->offermsg.offer.sub_channel_index;
Stephen Hemminger345ac082017-12-12 16:48:38 -0800800 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700801 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700802 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700803
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700804 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700805 return NVSP_STAT_FAIL;
806
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700807 /* Allocate a skb - TODO direct I/O to pages? */
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800808 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
809 csum_info, vlan, data, len);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700810 if (unlikely(!skb)) {
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -0800811 ++net_device_ctx->eth_stats.rx_no_memory;
stephen hemminger0719e722017-01-11 09:16:32 -0800812 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700813 return NVSP_STAT_FAIL;
814 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700815
stephen hemminger0c195562017-08-01 19:58:53 -0700816 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700817
818 /*
819 * Even if injecting the packet, record the statistics
820 * on the synthetic device because modifying the VF device
821 * statistics will not work correctly.
822 */
stephen hemminger742fe542017-02-27 10:26:50 -0800823 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700824 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700825 rx_stats->packets++;
stephen hemmingerdc54a082017-01-24 13:06:08 -0800826 rx_stats->bytes += len;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700827
828 if (skb->pkt_type == PACKET_BROADCAST)
829 ++rx_stats->broadcast;
830 else if (skb->pkt_type == PACKET_MULTICAST)
831 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700832 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800833
stephen hemminger742fe542017-02-27 10:26:50 -0800834 napi_gro_receive(&nvchan->napi, skb);
Haiyang Zhang5c71dad2018-03-22 12:01:13 -0700835 return NVSP_STAT_SUCCESS;
Hank Janssenfceaf242009-07-13 15:34:54 -0700836}
837
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700838static void netvsc_get_drvinfo(struct net_device *net,
839 struct ethtool_drvinfo *info)
840{
Jiri Pirko7826d432013-01-06 00:44:26 +0000841 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000842 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700843}
844
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800845static void netvsc_get_channels(struct net_device *net,
846 struct ethtool_channels *channel)
847{
848 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700849 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800850
851 if (nvdev) {
852 channel->max_combined = nvdev->max_chn;
853 channel->combined_count = nvdev->num_chn;
854 }
855}
856
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700857static int netvsc_detach(struct net_device *ndev,
858 struct netvsc_device *nvdev)
859{
860 struct net_device_context *ndev_ctx = netdev_priv(ndev);
861 struct hv_device *hdev = ndev_ctx->device_ctx;
862 int ret;
863
864 /* Don't try continuing to try and setup sub channels */
865 if (cancel_work_sync(&nvdev->subchan_work))
866 nvdev->num_chn = 1;
867
868 /* If device was up (receiving) then shutdown */
869 if (netif_running(ndev)) {
870 netif_tx_disable(ndev);
871
872 ret = rndis_filter_close(nvdev);
873 if (ret) {
874 netdev_err(ndev,
875 "unable to close device (ret %d).\n", ret);
876 return ret;
877 }
878
879 ret = netvsc_wait_until_empty(nvdev);
880 if (ret) {
881 netdev_err(ndev,
882 "Ring buffer not empty after closing rndis\n");
883 return ret;
884 }
885 }
886
887 netif_device_detach(ndev);
888
889 rndis_filter_device_remove(hdev, nvdev);
890
891 return 0;
892}
893
894static int netvsc_attach(struct net_device *ndev,
895 struct netvsc_device_info *dev_info)
896{
897 struct net_device_context *ndev_ctx = netdev_priv(ndev);
898 struct hv_device *hdev = ndev_ctx->device_ctx;
899 struct netvsc_device *nvdev;
900 struct rndis_device *rdev;
901 int ret;
902
903 nvdev = rndis_filter_device_add(hdev, dev_info);
904 if (IS_ERR(nvdev))
905 return PTR_ERR(nvdev);
906
907 /* Note: enable and attach happen when sub-channels setup */
908
909 netif_carrier_off(ndev);
910
911 if (netif_running(ndev)) {
912 ret = rndis_filter_open(nvdev);
913 if (ret)
914 return ret;
915
916 rdev = nvdev->extension;
917 if (!rdev->link_state)
918 netif_carrier_on(ndev);
919 }
920
921 return 0;
922}
923
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700924static int netvsc_set_channels(struct net_device *net,
925 struct ethtool_channels *channels)
926{
927 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700928 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -0700929 unsigned int orig, count = channels->combined_count;
930 struct netvsc_device_info device_info;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700931 int ret;
stephen hemminger2b018882017-01-24 13:06:03 -0800932
933 /* We do not support separate count for rx, tx, or other */
934 if (count == 0 ||
935 channels->rx_count || channels->tx_count || channels->other_count)
936 return -EINVAL;
937
stephen hemmingera0be4502017-03-22 14:51:01 -0700938 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700939 return -ENODEV;
940
stephen hemminger2b018882017-01-24 13:06:03 -0800941 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700942 return -EINVAL;
943
stephen hemminger2b018882017-01-24 13:06:03 -0800944 if (count > nvdev->max_chn)
945 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700946
stephen hemminger7ca45932017-07-24 10:57:28 -0700947 orig = nvdev->num_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700948
stephen hemminger7ca45932017-07-24 10:57:28 -0700949 memset(&device_info, 0, sizeof(device_info));
950 device_info.num_chn = count;
stephen hemminger8b532792017-08-09 17:46:11 -0700951 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700952 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700953 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700954 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700955
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700956 ret = netvsc_detach(net, nvdev);
957 if (ret)
958 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700959
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700960 ret = netvsc_attach(net, &device_info);
961 if (ret) {
stephen hemminger7ca45932017-07-24 10:57:28 -0700962 device_info.num_chn = orig;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700963 if (netvsc_attach(net, &device_info))
964 netdev_err(net, "restoring channel setting failed\n");
stephen hemminger7ca45932017-07-24 10:57:28 -0700965 }
966
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700967 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700968}
969
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100970static bool
971netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800972{
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100973 struct ethtool_link_ksettings diff1 = *cmd;
974 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800975
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100976 diff1.base.speed = 0;
977 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800978 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100979 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
980 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800981 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100982 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800983
984 return !memcmp(&diff1, &diff2, sizeof(diff1));
985}
986
987static void netvsc_init_settings(struct net_device *dev)
988{
989 struct net_device_context *ndc = netdev_priv(dev);
990
Haiyang Zhang486e3982017-10-06 08:33:57 -0700991 ndc->l4_hash = HV_DEFAULT_L4HASH;
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700992
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800993 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -0700994 ndc->duplex = DUPLEX_FULL;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800995}
996
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100997static int netvsc_get_link_ksettings(struct net_device *dev,
998 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800999{
1000 struct net_device_context *ndc = netdev_priv(dev);
1001
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001002 cmd->base.speed = ndc->speed;
1003 cmd->base.duplex = ndc->duplex;
1004 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001005
1006 return 0;
1007}
1008
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001009static int netvsc_set_link_ksettings(struct net_device *dev,
1010 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001011{
1012 struct net_device_context *ndc = netdev_priv(dev);
1013 u32 speed;
1014
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001015 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001016 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001017 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001018 !netvsc_validate_ethtool_ss_cmd(cmd))
1019 return -EINVAL;
1020
1021 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001022 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001023
1024 return 0;
1025}
1026
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001027static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1028{
1029 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger0c195562017-08-01 19:58:53 -07001030 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001031 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
stephen hemminger9749fed2017-07-19 11:53:16 -07001032 int orig_mtu = ndev->mtu;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001033 struct netvsc_device_info device_info;
stephen hemminger9749fed2017-07-19 11:53:16 -07001034 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001035
stephen hemmingera0be4502017-03-22 14:51:01 -07001036 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001037 return -ENODEV;
1038
stephen hemminger0c195562017-08-01 19:58:53 -07001039 /* Change MTU of underlying VF netdev first. */
1040 if (vf_netdev) {
1041 ret = dev_set_mtu(vf_netdev, mtu);
1042 if (ret)
1043 return ret;
1044 }
1045
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001046 memset(&device_info, 0, sizeof(device_info));
stephen hemminger2b018882017-01-24 13:06:03 -08001047 device_info.num_chn = nvdev->num_chn;
stephen hemminger8b532792017-08-09 17:46:11 -07001048 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -07001049 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001050 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -07001051 device_info.recv_section_size = nvdev->recv_section_size;
Dexuan Cui152669b2017-03-02 13:00:53 +00001052
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001053 ret = netvsc_detach(ndev, nvdev);
1054 if (ret)
1055 goto rollback_vf;
Dexuan Cui152669b2017-03-02 13:00:53 +00001056
Dexuan Cui152669b2017-03-02 13:00:53 +00001057 ndev->mtu = mtu;
1058
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001059 ret = netvsc_attach(ndev, &device_info);
1060 if (ret)
1061 goto rollback;
stephen hemminger9749fed2017-07-19 11:53:16 -07001062
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001063 return 0;
stephen hemminger0c195562017-08-01 19:58:53 -07001064
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001065rollback:
1066 /* Attempt rollback to original MTU */
1067 ndev->mtu = orig_mtu;
stephen hemminger68d715f2017-08-09 17:46:06 -07001068
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001069 if (netvsc_attach(ndev, &device_info))
1070 netdev_err(ndev, "restoring mtu failed\n");
1071rollback_vf:
1072 if (vf_netdev)
1073 dev_set_mtu(vf_netdev, orig_mtu);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001074
stephen hemminger9749fed2017-07-19 11:53:16 -07001075 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001076}
1077
stephen hemminger0c195562017-08-01 19:58:53 -07001078static void netvsc_get_vf_stats(struct net_device *net,
1079 struct netvsc_vf_pcpu_stats *tot)
1080{
1081 struct net_device_context *ndev_ctx = netdev_priv(net);
1082 int i;
1083
1084 memset(tot, 0, sizeof(*tot));
1085
1086 for_each_possible_cpu(i) {
1087 const struct netvsc_vf_pcpu_stats *stats
1088 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1089 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1090 unsigned int start;
1091
1092 do {
1093 start = u64_stats_fetch_begin_irq(&stats->syncp);
1094 rx_packets = stats->rx_packets;
1095 tx_packets = stats->tx_packets;
1096 rx_bytes = stats->rx_bytes;
1097 tx_bytes = stats->tx_bytes;
1098 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1099
1100 tot->rx_packets += rx_packets;
1101 tot->tx_packets += tx_packets;
1102 tot->rx_bytes += rx_bytes;
1103 tot->tx_bytes += tx_bytes;
1104 tot->tx_dropped += stats->tx_dropped;
1105 }
1106}
1107
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001108static void netvsc_get_stats64(struct net_device *net,
1109 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001110{
1111 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -07001112 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001113 struct netvsc_vf_pcpu_stats vf_tot;
stephen hemminger89bb42b2017-08-09 17:46:08 -07001114 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001115
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001116 if (!nvdev)
1117 return;
1118
stephen hemminger0c195562017-08-01 19:58:53 -07001119 netdev_stats_to_stats64(t, &net->stats);
1120
1121 netvsc_get_vf_stats(net, &vf_tot);
1122 t->rx_packets += vf_tot.rx_packets;
1123 t->tx_packets += vf_tot.tx_packets;
1124 t->rx_bytes += vf_tot.rx_bytes;
1125 t->tx_bytes += vf_tot.tx_bytes;
1126 t->tx_dropped += vf_tot.tx_dropped;
1127
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001128 for (i = 0; i < nvdev->num_chn; i++) {
1129 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1130 const struct netvsc_stats *stats;
1131 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001132 unsigned int start;
1133
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001134 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001135 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001136 start = u64_stats_fetch_begin_irq(&stats->syncp);
1137 packets = stats->packets;
1138 bytes = stats->bytes;
1139 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001140
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001141 t->tx_bytes += bytes;
1142 t->tx_packets += packets;
1143
1144 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001145 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001146 start = u64_stats_fetch_begin_irq(&stats->syncp);
1147 packets = stats->packets;
1148 bytes = stats->bytes;
1149 multicast = stats->multicast + stats->broadcast;
1150 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001151
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001152 t->rx_bytes += bytes;
1153 t->rx_packets += packets;
1154 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001155 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001156}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001157
1158static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1159{
stephen hemminger867047c2017-07-28 08:59:42 -07001160 struct net_device_context *ndc = netdev_priv(ndev);
stephen hemminger16ba3262017-08-09 17:46:05 -07001161 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
stephen hemminger867047c2017-07-28 08:59:42 -07001162 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001163 struct sockaddr *addr = p;
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001164 int err;
1165
stephen hemminger16ba3262017-08-09 17:46:05 -07001166 err = eth_prepare_mac_addr_change(ndev, p);
1167 if (err)
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001168 return err;
1169
stephen hemminger867047c2017-07-28 08:59:42 -07001170 if (!nvdev)
1171 return -ENODEV;
1172
stephen hemminger16ba3262017-08-09 17:46:05 -07001173 if (vf_netdev) {
1174 err = dev_set_mac_address(vf_netdev, addr);
1175 if (err)
1176 return err;
1177 }
1178
stephen hemminger867047c2017-07-28 08:59:42 -07001179 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
stephen hemminger16ba3262017-08-09 17:46:05 -07001180 if (!err) {
1181 eth_commit_mac_addr_change(ndev, p);
1182 } else if (vf_netdev) {
1183 /* rollback change on VF */
1184 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1185 dev_set_mac_address(vf_netdev, addr);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001186 }
1187
1188 return err;
1189}
1190
Stephen Hemminger4323b472016-08-23 12:17:57 -07001191static const struct {
1192 char name[ETH_GSTRING_LEN];
1193 u16 offset;
1194} netvsc_stats[] = {
1195 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001196 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001197 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1198 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1199 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
stephen hemmingercad5c192017-08-09 17:46:12 -07001200 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1201 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001202 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
Simon Xiao09af87d2017-09-29 11:39:46 -07001203 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1204 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
stephen hemminger0c195562017-08-01 19:58:53 -07001205}, vf_stats[] = {
1206 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1207 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1208 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1209 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1210 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001211};
1212
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001213#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
stephen hemminger0c195562017-08-01 19:58:53 -07001214#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001215
1216/* 4 statistics per queue (rx/tx packets/bytes) */
1217#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1218
Stephen Hemminger4323b472016-08-23 12:17:57 -07001219static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1220{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001221 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001222 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001223
1224 if (!nvdev)
1225 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001226
Stephen Hemminger4323b472016-08-23 12:17:57 -07001227 switch (string_set) {
1228 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001229 return NETVSC_GLOBAL_STATS_LEN
1230 + NETVSC_VF_STATS_LEN
1231 + NETVSC_QUEUE_STATS_LEN(nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001232 default:
1233 return -EINVAL;
1234 }
1235}
1236
1237static void netvsc_get_ethtool_stats(struct net_device *dev,
1238 struct ethtool_stats *stats, u64 *data)
1239{
1240 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001241 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001242 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001243 const struct netvsc_stats *qstats;
stephen hemminger0c195562017-08-01 19:58:53 -07001244 struct netvsc_vf_pcpu_stats sum;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001245 unsigned int start;
1246 u64 packets, bytes;
1247 int i, j;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001248
stephen hemminger545a8e72017-03-22 14:51:00 -07001249 if (!nvdev)
1250 return;
1251
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001252 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001253 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001254
stephen hemminger0c195562017-08-01 19:58:53 -07001255 netvsc_get_vf_stats(dev, &sum);
1256 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1257 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1258
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001259 for (j = 0; j < nvdev->num_chn; j++) {
1260 qstats = &nvdev->chan_table[j].tx_stats;
1261
1262 do {
1263 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1264 packets = qstats->packets;
1265 bytes = qstats->bytes;
1266 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1267 data[i++] = packets;
1268 data[i++] = bytes;
1269
1270 qstats = &nvdev->chan_table[j].rx_stats;
1271 do {
1272 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1273 packets = qstats->packets;
1274 bytes = qstats->bytes;
1275 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1276 data[i++] = packets;
1277 data[i++] = bytes;
1278 }
Stephen Hemminger4323b472016-08-23 12:17:57 -07001279}
1280
1281static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1282{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001283 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001284 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001285 u8 *p = data;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001286 int i;
1287
stephen hemminger545a8e72017-03-22 14:51:00 -07001288 if (!nvdev)
1289 return;
1290
Stephen Hemminger4323b472016-08-23 12:17:57 -07001291 switch (stringset) {
1292 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001293 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1294 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1295 p += ETH_GSTRING_LEN;
1296 }
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001297
stephen hemminger0c195562017-08-01 19:58:53 -07001298 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1299 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1300 p += ETH_GSTRING_LEN;
1301 }
1302
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001303 for (i = 0; i < nvdev->num_chn; i++) {
1304 sprintf(p, "tx_queue_%u_packets", i);
1305 p += ETH_GSTRING_LEN;
1306 sprintf(p, "tx_queue_%u_bytes", i);
1307 p += ETH_GSTRING_LEN;
1308 sprintf(p, "rx_queue_%u_packets", i);
1309 p += ETH_GSTRING_LEN;
1310 sprintf(p, "rx_queue_%u_bytes", i);
1311 p += ETH_GSTRING_LEN;
1312 }
1313
Stephen Hemminger4323b472016-08-23 12:17:57 -07001314 break;
1315 }
1316}
1317
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001318static int
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001319netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1320 struct ethtool_rxnfc *info)
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001321{
Haiyang Zhang486e3982017-10-06 08:33:57 -07001322 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1323
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001324 info->data = RXH_IP_SRC | RXH_IP_DST;
1325
1326 switch (info->flow_type) {
1327 case TCP_V4_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001328 if (ndc->l4_hash & HV_TCP4_L4HASH)
1329 info->data |= l4_flag;
1330
1331 break;
1332
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001333 case TCP_V6_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001334 if (ndc->l4_hash & HV_TCP6_L4HASH)
1335 info->data |= l4_flag;
1336
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001337 break;
1338
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001339 case UDP_V4_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001340 if (ndc->l4_hash & HV_UDP4_L4HASH)
1341 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001342
1343 break;
1344
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001345 case UDP_V6_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001346 if (ndc->l4_hash & HV_UDP6_L4HASH)
1347 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001348
1349 break;
1350
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001351 case IPV4_FLOW:
1352 case IPV6_FLOW:
1353 break;
1354 default:
1355 info->data = 0;
1356 break;
1357 }
1358
1359 return 0;
1360}
1361
1362static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001363netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1364 u32 *rules)
1365{
1366 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001367 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001368
1369 if (!nvdev)
1370 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001371
1372 switch (info->cmd) {
1373 case ETHTOOL_GRXRINGS:
1374 info->data = nvdev->num_chn;
1375 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001376
1377 case ETHTOOL_GRXFH:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001378 return netvsc_get_rss_hash_opts(ndc, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001379 }
1380 return -EOPNOTSUPP;
1381}
1382
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001383static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1384 struct ethtool_rxnfc *info)
1385{
1386 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1387 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001388 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001389 case TCP_V4_FLOW:
1390 ndc->l4_hash |= HV_TCP4_L4HASH;
1391 break;
1392
1393 case TCP_V6_FLOW:
1394 ndc->l4_hash |= HV_TCP6_L4HASH;
1395 break;
1396
Haiyang Zhang486e3982017-10-06 08:33:57 -07001397 case UDP_V4_FLOW:
1398 ndc->l4_hash |= HV_UDP4_L4HASH;
1399 break;
1400
1401 case UDP_V6_FLOW:
1402 ndc->l4_hash |= HV_UDP6_L4HASH;
1403 break;
1404
1405 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001406 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001407 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001408
1409 return 0;
1410 }
1411
1412 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001413 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001414 case TCP_V4_FLOW:
1415 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1416 break;
1417
1418 case TCP_V6_FLOW:
1419 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1420 break;
1421
Haiyang Zhang486e3982017-10-06 08:33:57 -07001422 case UDP_V4_FLOW:
1423 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1424 break;
1425
1426 case UDP_V6_FLOW:
1427 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1428 break;
1429
1430 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001431 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001432 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001433
1434 return 0;
1435 }
1436
1437 return -EOPNOTSUPP;
1438}
1439
1440static int
1441netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1442{
1443 struct net_device_context *ndc = netdev_priv(ndev);
1444
1445 if (info->cmd == ETHTOOL_SRXFH)
1446 return netvsc_set_rss_hash_opts(ndc, info);
1447
1448 return -EOPNOTSUPP;
1449}
1450
Richard Weinberger316158f2014-07-09 16:23:59 +02001451#ifdef CONFIG_NET_POLL_CONTROLLER
stephen hemmingera5ecd432017-06-07 15:53:48 -07001452static void netvsc_poll_controller(struct net_device *dev)
Richard Weinberger316158f2014-07-09 16:23:59 +02001453{
stephen hemmingera5ecd432017-06-07 15:53:48 -07001454 struct net_device_context *ndc = netdev_priv(dev);
1455 struct netvsc_device *ndev;
1456 int i;
1457
1458 rcu_read_lock();
1459 ndev = rcu_dereference(ndc->nvdev);
1460 if (ndev) {
1461 for (i = 0; i < ndev->num_chn; i++) {
1462 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1463
1464 napi_schedule(&nvchan->napi);
1465 }
1466 }
1467 rcu_read_unlock();
Richard Weinberger316158f2014-07-09 16:23:59 +02001468}
1469#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001470
stephen hemminger962f3fe2017-01-24 13:06:02 -08001471static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1472{
1473 return NETVSC_HASH_KEYLEN;
1474}
1475
1476static u32 netvsc_rss_indir_size(struct net_device *dev)
1477{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001478 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001479}
1480
1481static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1482 u8 *hfunc)
1483{
1484 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001485 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001486 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001487 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001488
stephen hemminger545a8e72017-03-22 14:51:00 -07001489 if (!ndev)
1490 return -ENODEV;
1491
stephen hemminger962f3fe2017-01-24 13:06:02 -08001492 if (hfunc)
1493 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1494
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001495 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001496 if (indir) {
1497 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001498 indir[i] = rndis_dev->rx_table[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001499 }
1500
stephen hemminger962f3fe2017-01-24 13:06:02 -08001501 if (key)
1502 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1503
1504 return 0;
1505}
1506
1507static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1508 const u8 *key, const u8 hfunc)
1509{
1510 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001511 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001512 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001513 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001514
stephen hemminger545a8e72017-03-22 14:51:00 -07001515 if (!ndev)
1516 return -ENODEV;
1517
stephen hemminger962f3fe2017-01-24 13:06:02 -08001518 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1519 return -EOPNOTSUPP;
1520
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001521 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001522 if (indir) {
1523 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhangdb3cd7a2017-09-01 14:30:07 -07001524 if (indir[i] >= ndev->num_chn)
stephen hemmingerff4a4412017-01-24 13:06:04 -08001525 return -EINVAL;
1526
1527 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001528 rndis_dev->rx_table[i] = indir[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001529 }
1530
1531 if (!key) {
1532 if (!indir)
1533 return 0;
1534
1535 key = rndis_dev->rss_key;
1536 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001537
Haiyang Zhang715e2ec2017-09-01 14:30:04 -07001538 return rndis_filter_set_rss_param(rndis_dev, key);
stephen hemminger962f3fe2017-01-24 13:06:02 -08001539}
1540
stephen hemminger8b532792017-08-09 17:46:11 -07001541/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1542 * It does have pre-allocated receive area which is divided into sections.
1543 */
1544static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1545 struct ethtool_ringparam *ring)
1546{
1547 u32 max_buf_size;
1548
1549 ring->rx_pending = nvdev->recv_section_cnt;
1550 ring->tx_pending = nvdev->send_section_cnt;
1551
1552 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1553 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1554 else
1555 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1556
1557 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1558 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1559 / nvdev->send_section_size;
1560}
1561
1562static void netvsc_get_ringparam(struct net_device *ndev,
1563 struct ethtool_ringparam *ring)
1564{
1565 struct net_device_context *ndevctx = netdev_priv(ndev);
1566 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1567
1568 if (!nvdev)
1569 return;
1570
1571 __netvsc_get_ringparam(nvdev, ring);
1572}
1573
1574static int netvsc_set_ringparam(struct net_device *ndev,
1575 struct ethtool_ringparam *ring)
1576{
1577 struct net_device_context *ndevctx = netdev_priv(ndev);
1578 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
stephen hemminger8b532792017-08-09 17:46:11 -07001579 struct netvsc_device_info device_info;
1580 struct ethtool_ringparam orig;
1581 u32 new_tx, new_rx;
stephen hemminger8b532792017-08-09 17:46:11 -07001582 int ret = 0;
1583
1584 if (!nvdev || nvdev->destroy)
1585 return -ENODEV;
1586
1587 memset(&orig, 0, sizeof(orig));
1588 __netvsc_get_ringparam(nvdev, &orig);
1589
1590 new_tx = clamp_t(u32, ring->tx_pending,
1591 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1592 new_rx = clamp_t(u32, ring->rx_pending,
1593 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1594
1595 if (new_tx == orig.tx_pending &&
1596 new_rx == orig.rx_pending)
1597 return 0; /* no change */
1598
1599 memset(&device_info, 0, sizeof(device_info));
1600 device_info.num_chn = nvdev->num_chn;
stephen hemminger8b532792017-08-09 17:46:11 -07001601 device_info.send_sections = new_tx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001602 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001603 device_info.recv_sections = new_rx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001604 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001605
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001606 ret = netvsc_detach(ndev, nvdev);
1607 if (ret)
1608 return ret;
stephen hemminger8b532792017-08-09 17:46:11 -07001609
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001610 ret = netvsc_attach(ndev, &device_info);
1611 if (ret) {
stephen hemminger8b532792017-08-09 17:46:11 -07001612 device_info.send_sections = orig.tx_pending;
1613 device_info.recv_sections = orig.rx_pending;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001614
1615 if (netvsc_attach(ndev, &device_info))
1616 netdev_err(ndev, "restoring ringparam failed");
stephen hemminger8b532792017-08-09 17:46:11 -07001617 }
1618
stephen hemminger8b532792017-08-09 17:46:11 -07001619 return ret;
1620}
1621
Haiyang Zhang273de022018-05-22 11:29:34 -07001622static u32 netvsc_get_msglevel(struct net_device *ndev)
1623{
1624 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1625
1626 return ndev_ctx->msg_enable;
1627}
1628
1629static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1630{
1631 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1632
1633 ndev_ctx->msg_enable = val;
1634}
1635
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001636static const struct ethtool_ops ethtool_ops = {
1637 .get_drvinfo = netvsc_get_drvinfo,
Haiyang Zhang273de022018-05-22 11:29:34 -07001638 .get_msglevel = netvsc_get_msglevel,
1639 .set_msglevel = netvsc_set_msglevel,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001640 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001641 .get_ethtool_stats = netvsc_get_ethtool_stats,
1642 .get_sset_count = netvsc_get_sset_count,
1643 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001644 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001645 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001646 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001647 .get_rxnfc = netvsc_get_rxnfc,
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001648 .set_rxnfc = netvsc_set_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001649 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1650 .get_rxfh_indir_size = netvsc_rss_indir_size,
1651 .get_rxfh = netvsc_get_rxfh,
1652 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001653 .get_link_ksettings = netvsc_get_link_ksettings,
1654 .set_link_ksettings = netvsc_set_link_ksettings,
stephen hemminger8b532792017-08-09 17:46:11 -07001655 .get_ringparam = netvsc_get_ringparam,
1656 .set_ringparam = netvsc_set_ringparam,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001657};
1658
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001659static const struct net_device_ops device_ops = {
1660 .ndo_open = netvsc_open,
1661 .ndo_stop = netvsc_close,
1662 .ndo_start_xmit = netvsc_start_xmit,
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001663 .ndo_change_rx_flags = netvsc_change_rx_flags,
1664 .ndo_set_rx_mode = netvsc_set_rx_mode,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001665 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001666 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001667 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001668 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001669 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001670#ifdef CONFIG_NET_POLL_CONTROLLER
1671 .ndo_poll_controller = netvsc_poll_controller,
1672#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001673};
1674
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001675/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001676 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1677 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1678 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001679 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001680static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001681{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001682 struct net_device_context *ndev_ctx =
1683 container_of(w, struct net_device_context, dwork.work);
1684 struct hv_device *device_obj = ndev_ctx->device_ctx;
1685 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001686 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001687 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001688 struct netvsc_reconfig *event = NULL;
1689 bool notify = false, reschedule = false;
1690 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001691
stephen hemminger9b4e9462017-08-24 16:49:16 -07001692 /* if changes are happening, comeback later */
1693 if (!rtnl_trylock()) {
1694 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1695 return;
1696 }
1697
stephen hemmingera0be4502017-03-22 14:51:01 -07001698 net_device = rtnl_dereference(ndev_ctx->nvdev);
1699 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001700 goto out_unlock;
1701
Haiyang Zhang891de742014-02-12 16:54:27 -08001702 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001703
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001704 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1705 if (time_is_after_jiffies(next_reconfig)) {
1706 /* link_watch only sends one notification with current state
1707 * per second, avoid doing reconfig more frequently. Handle
1708 * wrap around.
1709 */
1710 delay = next_reconfig - jiffies;
1711 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1712 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001713 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001714 }
1715 ndev_ctx->last_reconfig = jiffies;
1716
1717 spin_lock_irqsave(&ndev_ctx->lock, flags);
1718 if (!list_empty(&ndev_ctx->reconfig_events)) {
1719 event = list_first_entry(&ndev_ctx->reconfig_events,
1720 struct netvsc_reconfig, list);
1721 list_del(&event->list);
1722 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1723 }
1724 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1725
1726 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001727 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001728
1729 switch (event->event) {
1730 /* Only the following events are possible due to the check in
1731 * netvsc_linkstatus_callback()
1732 */
1733 case RNDIS_STATUS_MEDIA_CONNECT:
1734 if (rdev->link_state) {
1735 rdev->link_state = false;
stephen hemminger0c195562017-08-01 19:58:53 -07001736 netif_carrier_on(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001737 netif_tx_wake_all_queues(net);
1738 } else {
1739 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001740 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001741 kfree(event);
1742 break;
1743 case RNDIS_STATUS_MEDIA_DISCONNECT:
1744 if (!rdev->link_state) {
1745 rdev->link_state = true;
1746 netif_carrier_off(net);
1747 netif_tx_stop_all_queues(net);
1748 }
1749 kfree(event);
1750 break;
1751 case RNDIS_STATUS_NETWORK_CHANGE:
1752 /* Only makes sense if carrier is present */
1753 if (!rdev->link_state) {
1754 rdev->link_state = true;
1755 netif_carrier_off(net);
1756 netif_tx_stop_all_queues(net);
1757 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1758 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001759 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001760 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1761 reschedule = true;
1762 }
1763 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001764 }
1765
1766 rtnl_unlock();
1767
1768 if (notify)
1769 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001770
1771 /* link_watch only sends one notification with current state per
1772 * second, handle next reconfig event in 2 seconds.
1773 */
1774 if (reschedule)
1775 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001776
1777 return;
1778
1779out_unlock:
1780 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001781}
1782
stephen hemminger0c195562017-08-01 19:58:53 -07001783/* Called when VF is injecting data into network stack.
1784 * Change the associated network device from VF to netvsc.
1785 * note: already called with rcu_read_lock
1786 */
1787static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1788{
1789 struct sk_buff *skb = *pskb;
1790 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1791 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1792 struct netvsc_vf_pcpu_stats *pcpu_stats
1793 = this_cpu_ptr(ndev_ctx->vf_stats);
1794
1795 skb->dev = ndev;
1796
1797 u64_stats_update_begin(&pcpu_stats->syncp);
1798 pcpu_stats->rx_packets++;
1799 pcpu_stats->rx_bytes += skb->len;
1800 u64_stats_update_end(&pcpu_stats->syncp);
1801
1802 return RX_HANDLER_ANOTHER;
1803}
1804
stephen hemminger0c195562017-08-01 19:58:53 -07001805static void __netvsc_vf_setup(struct net_device *ndev,
1806 struct net_device *vf_netdev)
1807{
1808 int ret;
1809
stephen hemminger0c195562017-08-01 19:58:53 -07001810 /* Align MTU of VF with master */
1811 ret = dev_set_mtu(vf_netdev, ndev->mtu);
1812 if (ret)
1813 netdev_warn(vf_netdev,
1814 "unable to change mtu to %u\n", ndev->mtu);
1815
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001816 /* set multicast etc flags on VF */
1817 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08001818
1819 /* sync address list from ndev to VF */
1820 netif_addr_lock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001821 dev_uc_sync(vf_netdev, ndev);
1822 dev_mc_sync(vf_netdev, ndev);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08001823 netif_addr_unlock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001824
stephen hemminger0c195562017-08-01 19:58:53 -07001825 if (netif_running(ndev)) {
1826 ret = dev_open(vf_netdev);
1827 if (ret)
1828 netdev_warn(vf_netdev,
1829 "unable to open: %d\n", ret);
1830 }
1831}
1832
1833/* Setup VF as slave of the synthetic device.
1834 * Runs in workqueue to avoid recursion in netlink callbacks.
1835 */
1836static void netvsc_vf_setup(struct work_struct *w)
1837{
1838 struct net_device_context *ndev_ctx
stephen hemminger6123c662017-08-09 17:46:03 -07001839 = container_of(w, struct net_device_context, vf_takeover.work);
stephen hemminger0c195562017-08-01 19:58:53 -07001840 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
1841 struct net_device *vf_netdev;
1842
stephen hemmingerfb84af82017-08-04 12:14:00 -07001843 if (!rtnl_trylock()) {
stephen hemminger6123c662017-08-09 17:46:03 -07001844 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
stephen hemmingerfb84af82017-08-04 12:14:00 -07001845 return;
1846 }
1847
stephen hemminger0c195562017-08-01 19:58:53 -07001848 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1849 if (vf_netdev)
1850 __netvsc_vf_setup(ndev, vf_netdev);
1851
1852 rtnl_unlock();
1853}
1854
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001855static int netvsc_pre_register_vf(struct net_device *vf_netdev,
1856 struct net_device *ndev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001857{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001858 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001859 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001860
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001861 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001862 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001863 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001864 return -ENODEV;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001865
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001866 return 0;
1867}
stephen hemminger0c195562017-08-01 19:58:53 -07001868
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001869static int netvsc_register_vf(struct net_device *vf_netdev,
1870 struct net_device *ndev)
1871{
1872 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1873
1874 /* set slave flag before open to prevent IPv6 addrconf */
1875 vf_netdev->flags |= IFF_SLAVE;
1876
1877 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1878
1879 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
1880
1881 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
stephen hemminger0c195562017-08-01 19:58:53 -07001882
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001883 dev_hold(vf_netdev);
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001884 rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev);
1885
1886 return 0;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001887}
1888
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001889/* VF up/down change detected, schedule to change data path */
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001890static int netvsc_vf_changed(struct net_device *vf_netdev,
1891 struct net_device *ndev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001892{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001893 struct net_device_context *net_device_ctx;
stephen hemminger7b83f522017-08-07 11:30:00 -07001894 struct netvsc_device *netvsc_dev;
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001895 bool vf_is_up = netif_running(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001896
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001897 net_device_ctx = netdev_priv(ndev);
stephen hemminger7b83f522017-08-07 11:30:00 -07001898 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1899 if (!netvsc_dev)
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001900 return -ENODEV;
stephen hemminger7b83f522017-08-07 11:30:00 -07001901
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001902 netvsc_switch_datapath(ndev, vf_is_up);
1903 netdev_info(ndev, "Data path switched %s VF: %s\n",
1904 vf_is_up ? "to" : "from", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001905
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001906 return 0;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001907}
1908
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001909static int netvsc_pre_unregister_vf(struct net_device *vf_netdev,
1910 struct net_device *ndev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001911{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001912 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001913
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001914 net_device_ctx = netdev_priv(ndev);
stephen hemminger6123c662017-08-09 17:46:03 -07001915 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001916
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001917 return 0;
1918}
1919
1920static int netvsc_unregister_vf(struct net_device *vf_netdev,
1921 struct net_device *ndev)
1922{
1923 struct net_device_context *net_device_ctx;
1924
1925 net_device_ctx = netdev_priv(ndev);
1926
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001927 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001928
1929 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001930 dev_put(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07001931
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001932 return 0;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001933}
1934
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07001935static struct failover_ops netvsc_failover_ops = {
1936 .slave_pre_register = netvsc_pre_register_vf,
1937 .slave_register = netvsc_register_vf,
1938 .slave_pre_unregister = netvsc_pre_unregister_vf,
1939 .slave_unregister = netvsc_unregister_vf,
1940 .slave_link_change = netvsc_vf_changed,
1941 .slave_handle_frame = netvsc_vf_handle_frame,
1942};
1943
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001944static int netvsc_probe(struct hv_device *dev,
1945 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001946{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001947 struct net_device *net = NULL;
1948 struct net_device_context *net_device_ctx;
1949 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001950 struct netvsc_device *nvdev;
stephen hemminger0c195562017-08-01 19:58:53 -07001951 int ret = -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001952
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001953 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08001954 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001955 if (!net)
stephen hemminger0c195562017-08-01 19:58:53 -07001956 goto no_net;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001957
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001958 netif_carrier_off(net);
1959
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001960 netvsc_init_settings(net);
1961
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001962 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001963 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001964 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1965 if (netif_msg_probe(net_device_ctx))
1966 netdev_dbg(net, "netvsc msg_enable: %d\n",
1967 net_device_ctx->msg_enable);
1968
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001969 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001970
Haiyang Zhang891de742014-02-12 16:54:27 -08001971 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001972
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001973 spin_lock_init(&net_device_ctx->lock);
1974 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
stephen hemminger6123c662017-08-09 17:46:03 -07001975 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
stephen hemminger0c195562017-08-01 19:58:53 -07001976
1977 net_device_ctx->vf_stats
1978 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
1979 if (!net_device_ctx->vf_stats)
1980 goto no_stats;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001981
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001982 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001983 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001984 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001985
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001986 /* We always need headroom for rndis header */
1987 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1988
Haiyang Zhang6450f8f2017-09-22 15:31:38 -07001989 /* Initialize the number of queues to be 1, we may change it if more
1990 * channels are offered later.
1991 */
1992 netif_set_real_num_tx_queues(net, 1);
1993 netif_set_real_num_rx_queues(net, 1);
1994
Haiyang Zhang692e0842011-09-01 12:19:43 -07001995 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001996 memset(&device_info, 0, sizeof(device_info));
stephen hemminger3071ada2017-03-22 14:50:59 -07001997 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
stephen hemminger8b532792017-08-09 17:46:11 -07001998 device_info.send_sections = NETVSC_DEFAULT_TX;
Alex Ng0ab09be2017-09-20 11:17:35 -07001999 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
stephen hemminger8b532792017-08-09 17:46:11 -07002000 device_info.recv_sections = NETVSC_DEFAULT_RX;
Alex Ng0ab09be2017-09-20 11:17:35 -07002001 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
stephen hemminger9749fed2017-07-19 11:53:16 -07002002
2003 nvdev = rndis_filter_device_add(dev, &device_info);
2004 if (IS_ERR(nvdev)) {
2005 ret = PTR_ERR(nvdev);
Haiyang Zhang692e0842011-09-01 12:19:43 -07002006 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
stephen hemminger0c195562017-08-01 19:58:53 -07002007 goto rndis_failed;
Haiyang Zhang692e0842011-09-01 12:19:43 -07002008 }
stephen hemminger0c195562017-08-01 19:58:53 -07002009
Haiyang Zhang692e0842011-09-01 12:19:43 -07002010 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2011
Vitaly Kuznetsovaefd80e2017-11-15 15:12:55 +01002012 /* hw_features computed in rndis_netdev_set_hwcaps() */
stephen hemminger23312a32017-01-24 13:05:59 -08002013 net->features = net->hw_features |
2014 NETIF_F_HIGHDMA | NETIF_F_SG |
2015 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2016 net->vlan_features = net->features;
2017
stephen hemminger9749fed2017-07-19 11:53:16 -07002018 netdev_lockdep_set_classes(net);
2019
Jarod Wilsond0c2c992016-10-20 13:55:21 -04002020 /* MTU range: 68 - 1500 or 65521 */
2021 net->min_mtu = NETVSC_MTU_MIN;
2022 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2023 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2024 else
2025 net->max_mtu = ETH_DATA_LEN;
2026
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002027 ret = register_netdev(net);
2028 if (ret != 0) {
2029 pr_err("Unable to register netdev.\n");
stephen hemminger0c195562017-08-01 19:58:53 -07002030 goto register_failed;
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002031 }
2032
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07002033 net_device_ctx->failover = failover_register(net, &netvsc_failover_ops);
Wei Yongjun9c6ffba2018-05-31 02:04:43 +00002034 if (IS_ERR(net_device_ctx->failover)) {
2035 ret = PTR_ERR(net_device_ctx->failover);
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07002036 goto err_failover;
Wei Yongjun9c6ffba2018-05-31 02:04:43 +00002037 }
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07002038
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002039 return ret;
stephen hemminger0c195562017-08-01 19:58:53 -07002040
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07002041err_failover:
2042 unregister_netdev(net);
stephen hemminger0c195562017-08-01 19:58:53 -07002043register_failed:
2044 rndis_filter_device_remove(dev, nvdev);
2045rndis_failed:
2046 free_percpu(net_device_ctx->vf_stats);
2047no_stats:
2048 hv_set_drvdata(dev, NULL);
2049 free_netdev(net);
2050no_net:
2051 return ret;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002052}
2053
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002054static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002055{
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002056 struct net_device_context *ndev_ctx;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002057 struct net_device *vf_netdev, *net;
2058 struct netvsc_device *nvdev;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002059
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002060 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002061 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002062 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002063 return 0;
2064 }
2065
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002066 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002067
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002068 cancel_delayed_work_sync(&ndev_ctx->dwork);
2069
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002070 rcu_read_lock();
2071 nvdev = rcu_dereference(ndev_ctx->nvdev);
2072
2073 if (nvdev)
2074 cancel_work_sync(&nvdev->subchan_work);
2075
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002076 /*
2077 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07002078 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002079 */
stephen hemmingera0be4502017-03-22 14:51:01 -07002080 rtnl_lock();
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002081 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2082 if (vf_netdev)
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07002083 failover_slave_unregister(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002084
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002085 if (nvdev)
2086 rndis_filter_device_remove(dev, nvdev);
2087
Stephen Hemminger8195b132017-09-06 13:53:05 -07002088 unregister_netdevice(net);
2089
Sridhar Samudrala1ff78072018-05-24 09:55:14 -07002090 failover_unregister(ndev_ctx->failover);
2091
stephen hemmingera0be4502017-03-22 14:51:01 -07002092 rtnl_unlock();
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002093 rcu_read_unlock();
stephen hemmingera0be4502017-03-22 14:51:01 -07002094
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002095 hv_set_drvdata(dev, NULL);
2096
stephen hemminger0c195562017-08-01 19:58:53 -07002097 free_percpu(ndev_ctx->vf_stats);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08002098 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07002099 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002100}
2101
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002102static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002103 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08002104 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002105 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002106};
2107
2108MODULE_DEVICE_TABLE(vmbus, id_table);
2109
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002110/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002111static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00002112 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002113 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002114 .probe = netvsc_probe,
2115 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07002116};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002117
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002118static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07002119{
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07002120 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07002121}
2122
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002123static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002124{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002125 int ret;
2126
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002127 if (ring_size < RING_SIZE_MIN) {
2128 ring_size = RING_SIZE_MIN;
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002129 pr_info("Increased ring_size to %u (min allowed)\n",
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002130 ring_size);
2131 }
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002132 netvsc_ring_bytes = ring_size * PAGE_SIZE;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002133
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002134 ret = vmbus_driver_register(&netvsc_drv);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002135 if (ret)
2136 return ret;
2137
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002138 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002139}
2140
Hank Janssen26c14cc2010-02-11 23:02:42 +00002141MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07002142MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07002143
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002144module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002145module_exit(netvsc_drv_exit);