blob: dd1d6e115145d4c14fb25d1883d1e42614e211a9 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070036#include <linux/rtnetlink.h>
stephen hemminger0c195562017-08-01 19:58:53 -070037#include <linux/netpoll.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070038
Hank Janssenfceaf242009-07-13 15:34:54 -070039#include <net/arp.h>
40#include <net/route.h>
41#include <net/sock.h>
42#include <net/pkt_sched.h>
Michael Kelley8eb1b3c2017-05-30 11:36:56 -070043#include <net/checksum.h>
44#include <net/ip6_checksum.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070045
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070046#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070047
Stephen Hemminger7b2ee502018-03-20 15:03:05 -070048#define RING_SIZE_MIN 64
49#define RETRY_US_LO 5000
50#define RETRY_US_HI 10000
51#define RETRY_MAX 2000 /* >10 sec */
stephen hemminger8b532792017-08-09 17:46:11 -070052
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010053#define LINKCHANGE_INT (2 * HZ)
stephen hemminger6123c662017-08-09 17:46:03 -070054#define VF_TAKEOVER_INT (HZ / 10)
stephen hemmingera50af862016-12-06 13:43:54 -080055
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080056static unsigned int ring_size __ro_after_init = 128;
Joe Perchesd61e4032018-03-23 15:54:39 -070057module_param(ring_size, uint, 0444);
Stephen Hemminger450d7a42010-05-04 09:58:53 -070058MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080059unsigned int netvsc_ring_bytes __ro_after_init;
Hank Janssenfceaf242009-07-13 15:34:54 -070060
Simon Xiao3f300ff2015-04-28 01:05:17 -070061static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
62 NETIF_MSG_LINK | NETIF_MSG_IFUP |
63 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
64 NETIF_MSG_TX_ERR;
65
66static int debug = -1;
Joe Perchesd61e4032018-03-23 15:54:39 -070067module_param(debug, int, 0444);
Simon Xiao3f300ff2015-04-28 01:05:17 -070068MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -070070static LIST_HEAD(netvsc_dev_list);
71
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080072static void netvsc_change_rx_flags(struct net_device *net, int change)
Hank Janssenfceaf242009-07-13 15:34:54 -070073{
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080074 struct net_device_context *ndev_ctx = netdev_priv(net);
75 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
76 int inc;
77
78 if (!vf_netdev)
79 return;
80
81 if (change & IFF_PROMISC) {
82 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
83 dev_set_promiscuity(vf_netdev, inc);
84 }
85
86 if (change & IFF_ALLMULTI) {
87 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
88 dev_set_allmulti(vf_netdev, inc);
89 }
90}
91
92static void netvsc_set_rx_mode(struct net_device *net)
93{
94 struct net_device_context *ndev_ctx = netdev_priv(net);
Stephen Hemminger35a57b72018-03-07 13:49:11 -080095 struct net_device *vf_netdev;
96 struct netvsc_device *nvdev;
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080097
Stephen Hemminger35a57b72018-03-07 13:49:11 -080098 rcu_read_lock();
99 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -0800100 if (vf_netdev) {
101 dev_uc_sync(vf_netdev, net);
102 dev_mc_sync(vf_netdev, net);
103 }
Haiyang Zhangd426b2e2011-11-30 07:19:08 -0800104
Stephen Hemminger35a57b72018-03-07 13:49:11 -0800105 nvdev = rcu_dereference(ndev_ctx->nvdev);
106 if (nvdev)
107 rndis_filter_update(nvdev);
108 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700109}
110
Hank Janssenfceaf242009-07-13 15:34:54 -0700111static int netvsc_open(struct net_device *net)
112{
Haiyang Zhang53fa1a62017-06-21 16:40:47 -0700113 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -0700114 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -0700115 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
Haiyang Zhang891de742014-02-12 16:54:27 -0800116 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700117 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700118
Haiyang Zhang891de742014-02-12 16:54:27 -0800119 netif_carrier_off(net);
120
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700121 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200122 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700123 if (ret != 0) {
124 netdev_err(net, "unable to open device (ret %d).\n", ret);
125 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700126 }
127
Haiyang Zhang891de742014-02-12 16:54:27 -0800128 rdev = nvdev->extension;
Dexuan Cui52acf732018-06-06 21:32:51 +0000129 if (!rdev->link_state) {
Haiyang Zhang891de742014-02-12 16:54:27 -0800130 netif_carrier_on(net);
Dexuan Cui52acf732018-06-06 21:32:51 +0000131 netif_tx_wake_all_queues(net);
132 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800133
stephen hemminger0c195562017-08-01 19:58:53 -0700134 if (vf_netdev) {
135 /* Setting synthetic device up transparently sets
136 * slave as up. If open fails, then slave will be
137 * still be offline (and not used).
138 */
139 ret = dev_open(vf_netdev);
140 if (ret)
141 netdev_warn(net,
142 "unable to open slave: %s: %d\n",
143 vf_netdev->name, ret);
144 }
145 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700146}
147
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700148static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
Hank Janssenfceaf242009-07-13 15:34:54 -0700149{
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700150 unsigned int retry = 0;
151 int i;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700152
153 /* Ensure pending bytes in ring are read */
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700154 for (;;) {
155 u32 aread = 0;
156
Haiyang Zhang2de85302015-07-13 13:09:16 -0700157 for (i = 0; i < nvdev->num_chn; i++) {
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700158 struct vmbus_channel *chn
159 = nvdev->chan_table[i].channel;
160
Haiyang Zhang2de85302015-07-13 13:09:16 -0700161 if (!chn)
162 continue;
163
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700164 /* make sure receive not running now */
165 napi_synchronize(&nvdev->chan_table[i].napi);
166
stephen hemminger40975962017-06-08 16:21:19 -0700167 aread = hv_get_bytes_to_read(&chn->inbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700168 if (aread)
169 break;
170
stephen hemminger40975962017-06-08 16:21:19 -0700171 aread = hv_get_bytes_to_read(&chn->outbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700172 if (aread)
173 break;
174 }
175
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700176 if (aread == 0)
177 return 0;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700178
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700179 if (++retry > RETRY_MAX)
180 return -ETIMEDOUT;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700181
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700182 usleep_range(RETRY_US_LO, RETRY_US_HI);
183 }
184}
185
186static int netvsc_close(struct net_device *net)
187{
188 struct net_device_context *net_device_ctx = netdev_priv(net);
189 struct net_device *vf_netdev
190 = rtnl_dereference(net_device_ctx->vf_netdev);
191 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
192 int ret;
193
194 netif_tx_disable(net);
195
196 /* No need to close rndis filter if it is removed already */
197 if (!nvdev)
198 return 0;
199
200 ret = rndis_filter_close(nvdev);
201 if (ret != 0) {
202 netdev_err(net, "unable to close device (ret %d).\n", ret);
203 return ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700204 }
205
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700206 ret = netvsc_wait_until_empty(nvdev);
207 if (ret)
Haiyang Zhang2de85302015-07-13 13:09:16 -0700208 netdev_err(net, "Ring buffer not empty after closing rndis\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700209
stephen hemminger0c195562017-08-01 19:58:53 -0700210 if (vf_netdev)
211 dev_close(vf_netdev);
212
Hank Janssenfceaf242009-07-13 15:34:54 -0700213 return ret;
214}
215
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800216static inline void *init_ppi_data(struct rndis_message *msg,
217 u32 ppi_size, u32 pkt_type)
KY Srinivasan8a002512014-03-08 19:23:14 -0800218{
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800219 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
KY Srinivasan8a002512014-03-08 19:23:14 -0800220 struct rndis_per_packet_info *ppi;
221
KY Srinivasan8a002512014-03-08 19:23:14 -0800222 rndis_pkt->data_offset += ppi_size;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800223 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
224 + rndis_pkt->per_pkt_info_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800225
226 ppi->size = ppi_size;
227 ppi->type = pkt_type;
228 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
229
230 rndis_pkt->per_pkt_info_len += ppi_size;
231
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800232 return ppi + 1;
KY Srinivasan8a002512014-03-08 19:23:14 -0800233}
234
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700235/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
236 * packets. We can use ethtool to change UDP hash level when necessary.
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700237 */
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700238static inline u32 netvsc_get_hash(
239 struct sk_buff *skb,
240 const struct net_device_context *ndc)
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700241{
242 struct flow_keys flow;
Haiyang Zhang486e3982017-10-06 08:33:57 -0700243 u32 hash, pkt_proto = 0;
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700244 static u32 hashrnd __read_mostly;
245
246 net_get_random_once(&hashrnd, sizeof(hashrnd));
247
248 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
249 return 0;
250
Haiyang Zhang486e3982017-10-06 08:33:57 -0700251 switch (flow.basic.ip_proto) {
252 case IPPROTO_TCP:
253 if (flow.basic.n_proto == htons(ETH_P_IP))
254 pkt_proto = HV_TCP4_L4HASH;
255 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
256 pkt_proto = HV_TCP6_L4HASH;
257
258 break;
259
260 case IPPROTO_UDP:
261 if (flow.basic.n_proto == htons(ETH_P_IP))
262 pkt_proto = HV_UDP4_L4HASH;
263 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
264 pkt_proto = HV_UDP6_L4HASH;
265
266 break;
267 }
268
269 if (pkt_proto & ndc->l4_hash) {
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700270 return skb_get_hash(skb);
271 } else {
272 if (flow.basic.n_proto == htons(ETH_P_IP))
273 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
274 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
275 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
276 else
277 hash = 0;
278
279 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
280 }
281
282 return hash;
283}
284
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700285static inline int netvsc_get_tx_queue(struct net_device *ndev,
286 struct sk_buff *skb, int old_idx)
287{
288 const struct net_device_context *ndc = netdev_priv(ndev);
289 struct sock *sk = skb->sk;
290 int q_idx;
291
Haiyang Zhang39e91cf2017-10-13 12:28:04 -0700292 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
293 (VRSS_SEND_TAB_SIZE - 1)];
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700294
295 /* If queue index changed record the new value */
296 if (q_idx != old_idx &&
297 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
298 sk_tx_queue_set(sk, q_idx);
299
300 return q_idx;
301}
302
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800303/*
304 * Select queue for transmit.
305 *
306 * If a valid queue has already been assigned, then use that.
307 * Otherwise compute tx queue based on hash and the send table.
308 *
309 * This is basically similar to default (__netdev_pick_tx) with the added step
310 * of using the host send_table when no other queue has been assigned.
311 *
312 * TODO support XPS - but get_xps_queue not exported
313 */
stephen hemminger0c195562017-08-01 19:58:53 -0700314static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700315{
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700316 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700317
stephen hemminger0c195562017-08-01 19:58:53 -0700318 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700319 /* If forwarding a packet, we use the recorded queue when
320 * available for better cache locality.
321 */
322 if (skb_rx_queue_recorded(skb))
323 q_idx = skb_get_rx_queue(skb);
324 else
325 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800326 }
327
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700328 return q_idx;
329}
330
stephen hemminger0c195562017-08-01 19:58:53 -0700331static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
332 void *accel_priv,
333 select_queue_fallback_t fallback)
334{
335 struct net_device_context *ndc = netdev_priv(ndev);
336 struct net_device *vf_netdev;
337 u16 txq;
338
339 rcu_read_lock();
340 vf_netdev = rcu_dereference(ndc->vf_netdev);
341 if (vf_netdev) {
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800342 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
343
344 if (vf_ops->ndo_select_queue)
345 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
346 accel_priv, fallback);
347 else
348 txq = fallback(vf_netdev, skb);
349
350 /* Record the queue selected by VF so that it can be
351 * used for common case where VF has more queues than
352 * the synthetic device.
353 */
354 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
stephen hemminger0c195562017-08-01 19:58:53 -0700355 } else {
356 txq = netvsc_pick_tx(ndev, skb);
357 }
358 rcu_read_unlock();
359
360 while (unlikely(txq >= ndev->real_num_tx_queues))
361 txq -= ndev->real_num_tx_queues;
362
363 return txq;
364}
365
KY Srinivasan54a73572014-03-08 19:23:13 -0800366static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
stephen hemminger89bb42b2017-08-09 17:46:08 -0700367 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800368{
369 int j = 0;
370
371 /* Deal with compund pages by ignoring unused part
372 * of the page.
373 */
374 page += (offset >> PAGE_SHIFT);
375 offset &= ~PAGE_MASK;
376
377 while (len > 0) {
378 unsigned long bytes;
379
380 bytes = PAGE_SIZE - offset;
381 if (bytes > len)
382 bytes = len;
383 pb[j].pfn = page_to_pfn(page);
384 pb[j].offset = offset;
385 pb[j].len = bytes;
386
387 offset += bytes;
388 len -= bytes;
389
390 if (offset == PAGE_SIZE && len) {
391 page++;
392 offset = 0;
393 j++;
394 }
395 }
396
397 return j + 1;
398}
399
KY Srinivasan8a002512014-03-08 19:23:14 -0800400static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800401 struct hv_netvsc_packet *packet,
stephen hemminger02b6de02017-07-28 08:59:44 -0700402 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800403{
404 u32 slots_used = 0;
405 char *data = skb->data;
406 int frags = skb_shinfo(skb)->nr_frags;
407 int i;
408
409 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700410 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800411 * 2. skb linear data
412 * 3. skb fragment data
413 */
stephen hemmingerea5a32c2017-08-09 17:46:10 -0700414 slots_used += fill_pg_buf(virt_to_page(hdr),
415 offset_in_page(hdr),
416 len, &pb[slots_used]);
KY Srinivasan54a73572014-03-08 19:23:13 -0800417
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700418 packet->rmsg_size = len;
419 packet->rmsg_pgcnt = slots_used;
420
KY Srinivasan54a73572014-03-08 19:23:13 -0800421 slots_used += fill_pg_buf(virt_to_page(data),
422 offset_in_page(data),
423 skb_headlen(skb), &pb[slots_used]);
424
425 for (i = 0; i < frags; i++) {
426 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
427
428 slots_used += fill_pg_buf(skb_frag_page(frag),
429 frag->page_offset,
430 skb_frag_size(frag), &pb[slots_used]);
431 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800432 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800433}
434
stephen hemminger80d887d2017-07-24 21:03:19 -0700435static int count_skb_frag_slots(struct sk_buff *skb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800436{
stephen hemminger80d887d2017-07-24 21:03:19 -0700437 int i, frags = skb_shinfo(skb)->nr_frags;
438 int pages = 0;
439
440 for (i = 0; i < frags; i++) {
441 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
442 unsigned long size = skb_frag_size(frag);
443 unsigned long offset = frag->page_offset;
444
445 /* Skip unused frames from start of page */
446 offset &= ~PAGE_MASK;
447 pages += PFN_UP(offset + size);
448 }
449 return pages;
450}
451
452static int netvsc_get_slots(struct sk_buff *skb)
453{
454 char *data = skb->data;
455 unsigned int offset = offset_in_page(data);
456 unsigned int len = skb_headlen(skb);
457 int slots;
458 int frag_slots;
459
460 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
461 frag_slots = count_skb_frag_slots(skb);
462 return slots + frag_slots;
KY Srinivasan54a73572014-03-08 19:23:13 -0800463}
464
stephen hemminger23312a32017-01-24 13:05:59 -0800465static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800466{
stephen hemminger23312a32017-01-24 13:05:59 -0800467 if (skb->protocol == htons(ETH_P_IP)) {
468 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800469
stephen hemminger23312a32017-01-24 13:05:59 -0800470 if (ip->protocol == IPPROTO_TCP)
471 return TRANSPORT_INFO_IPV4_TCP;
472 else if (ip->protocol == IPPROTO_UDP)
473 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800474 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800475 struct ipv6hdr *ip6 = ipv6_hdr(skb);
476
477 if (ip6->nexthdr == IPPROTO_TCP)
478 return TRANSPORT_INFO_IPV6_TCP;
Mohammed Gamal37b9dfa2017-07-24 10:57:26 -0700479 else if (ip6->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800480 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800481 }
482
stephen hemminger23312a32017-01-24 13:05:59 -0800483 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800484}
485
stephen hemminger0c195562017-08-01 19:58:53 -0700486/* Send skb on the slave VF device. */
487static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
488 struct sk_buff *skb)
489{
490 struct net_device_context *ndev_ctx = netdev_priv(net);
491 unsigned int len = skb->len;
492 int rc;
493
494 skb->dev = vf_netdev;
495 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
496
497 rc = dev_queue_xmit(skb);
498 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
499 struct netvsc_vf_pcpu_stats *pcpu_stats
500 = this_cpu_ptr(ndev_ctx->vf_stats);
501
502 u64_stats_update_begin(&pcpu_stats->syncp);
503 pcpu_stats->tx_packets++;
504 pcpu_stats->tx_bytes += len;
505 u64_stats_update_end(&pcpu_stats->syncp);
506 } else {
507 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
508 }
509
510 return rc;
511}
512
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700513static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700514{
Hank Janssenfceaf242009-07-13 15:34:54 -0700515 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200516 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700517 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800518 unsigned int num_data_pgs;
519 struct rndis_message *rndis_msg;
stephen hemminger0c195562017-08-01 19:58:53 -0700520 struct net_device *vf_netdev;
KY Srinivasan8a002512014-03-08 19:23:14 -0800521 u32 rndis_msg_size;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700522 u32 hash;
stephen hemminger02b6de02017-07-28 08:59:44 -0700523 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
Hank Janssenfceaf242009-07-13 15:34:54 -0700524
stephen hemminger0c195562017-08-01 19:58:53 -0700525 /* if VF is present and up then redirect packets
526 * already called with rcu_read_lock_bh
527 */
528 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
529 if (vf_netdev && netif_running(vf_netdev) &&
530 !netpoll_tx_running(net))
531 return netvsc_vf_xmit(net, vf_netdev, skb);
532
stephen hemminger80d887d2017-07-24 21:03:19 -0700533 /* We will atmost need two pages to describe the rndis
534 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200535 * of pages in a single packet. If skb is scattered around
536 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800537 */
stephen hemminger80d887d2017-07-24 21:03:19 -0700538
539 num_data_pgs = netvsc_get_slots(skb) + 2;
540
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700541 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700542 ++net_device_ctx->eth_stats.tx_scattered;
543
544 if (skb_linearize(skb))
545 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700546
stephen hemminger80d887d2017-07-24 21:03:19 -0700547 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700548 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700549 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700550 goto drop;
551 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800552 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700553
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800554 /*
555 * Place the rndis header in the skb head room and
556 * the skb->cb will be used for hv_netvsc_packet
557 * structure.
558 */
559 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700560 if (ret)
561 goto no_memory;
562
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800563 /* Use the skb control buffer for building up the packet */
564 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
565 FIELD_SIZEOF(struct sk_buff, cb));
566 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700567
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700568 packet->q_idx = skb_get_queue_mapping(skb);
569
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800570 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800571 packet->total_bytes = skb->len;
572 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700573
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800574 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700575
KY Srinivasan8a002512014-03-08 19:23:14 -0800576 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800577 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
578 rndis_msg->msg_len = packet->total_data_buflen;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800579
580 rndis_msg->msg.pkt = (struct rndis_packet) {
581 .data_offset = sizeof(struct rndis_packet),
582 .data_len = packet->total_data_buflen,
583 .per_pkt_info_offset = sizeof(struct rndis_packet),
584 };
KY Srinivasan8a002512014-03-08 19:23:14 -0800585
586 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
587
Haiyang Zhang307f0992014-05-21 12:55:39 -0700588 hash = skb_get_hash_raw(skb);
589 if (hash != 0 && net->real_num_tx_queues > 1) {
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800590 u32 *hash_info;
591
Haiyang Zhang307f0992014-05-21 12:55:39 -0700592 rndis_msg_size += NDIS_HASH_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800593 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
594 NBL_HASH_VALUE);
595 *hash_info = hash;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700596 }
597
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700598 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800599 struct ndis_pkt_8021q_info *vlan;
600
601 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800602 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
603 IEEE_8021Q_INFO);
stephen hemminger00f50242017-08-09 17:46:09 -0700604
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800605 vlan->value = 0;
KY Srinivasan760d1e32015-12-01 16:43:19 -0800606 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
607 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800608 VLAN_PRIO_SHIFT;
609 }
610
stephen hemminger23312a32017-01-24 13:05:59 -0800611 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700612 struct ndis_tcp_lso_info *lso_info;
613
614 rndis_msg_size += NDIS_LSO_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800615 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
616 TCP_LARGESEND_PKTINFO);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700617
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800618 lso_info->value = 0;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700619 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800620 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700621 lso_info->lso_v2_transmit.ip_version =
622 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
623 ip_hdr(skb)->tot_len = 0;
624 ip_hdr(skb)->check = 0;
625 tcp_hdr(skb)->check =
626 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
627 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
628 } else {
629 lso_info->lso_v2_transmit.ip_version =
630 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
631 ipv6_hdr(skb)->payload_len = 0;
632 tcp_hdr(skb)->check =
633 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
634 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
635 }
stephen hemminger23312a32017-01-24 13:05:59 -0800636 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700637 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700638 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800639 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
640 struct ndis_tcp_ip_checksum_info *csum_info;
641
stephen hemmingerad19bc82016-10-11 14:03:07 -0700642 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800643 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
644 TCPIP_CHKSUM_PKTINFO);
stephen hemmingerad19bc82016-10-11 14:03:07 -0700645
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800646 csum_info->value = 0;
stephen hemminger23312a32017-01-24 13:05:59 -0800647 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
648
649 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700650 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800651
652 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
653 csum_info->transmit.tcp_checksum = 1;
654 else
655 csum_info->transmit.udp_checksum = 1;
656 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700657 csum_info->transmit.is_ipv6 = 1;
658
stephen hemminger23312a32017-01-24 13:05:59 -0800659 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
660 csum_info->transmit.tcp_checksum = 1;
661 else
662 csum_info->transmit.udp_checksum = 1;
663 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700664 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800665 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700666 if (skb_checksum_help(skb))
667 goto drop;
668 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700669 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800670
KY Srinivasan8a002512014-03-08 19:23:14 -0800671 /* Start filling in the page buffers with the rndis hdr */
672 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700673 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800674 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
stephen hemminger02b6de02017-07-28 08:59:44 -0700675 skb, packet, pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800676
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800677 /* timestamp packet in software */
678 skb_tx_timestamp(skb);
stephen hemminger2a926f72017-07-19 11:53:17 -0700679
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -0800680 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800681 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700682 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700683
684 if (ret == -EAGAIN) {
685 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700686 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700687 }
688
689 if (ret == -ENOSPC)
690 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700691
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700692drop:
693 dev_kfree_skb_any(skb);
694 net->stats.tx_dropped++;
695
696 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700697
698no_memory:
699 ++net_device_ctx->eth_stats.tx_no_memory;
700 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700701}
stephen hemminger89bb42b2017-08-09 17:46:08 -0700702
Hank Janssen3e189512010-03-04 22:11:00 +0000703/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700704 * netvsc_linkstatus_callback - Link up/down notification
705 */
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800706void netvsc_linkstatus_callback(struct net_device *net,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700707 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700708{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700709 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800710 struct net_device_context *ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100711 struct netvsc_reconfig *event;
712 unsigned long flags;
713
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700714 /* Update the physical link speed when changing to another vSwitch */
715 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
716 u32 speed;
717
stephen hemminger89bb42b2017-08-09 17:46:08 -0700718 speed = *(u32 *)((void *)indicate
719 + indicate->status_buf_offset) / 10000;
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700720 ndev_ctx->speed = speed;
721 return;
722 }
723
724 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100725 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
726 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
727 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
728 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700729
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700730 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700731 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700732
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100733 event = kzalloc(sizeof(*event), GFP_ATOMIC);
734 if (!event)
735 return;
736 event->event = indicate->status;
737
738 spin_lock_irqsave(&ndev_ctx->lock, flags);
739 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
740 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
741
742 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700743}
744
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700745static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800746 struct napi_struct *napi,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800747 const struct ndis_tcp_ip_checksum_info *csum_info,
748 const struct ndis_pkt_8021q_info *vlan,
749 void *data, u32 buflen)
Hank Janssenfceaf242009-07-13 15:34:54 -0700750{
Hank Janssenfceaf242009-07-13 15:34:54 -0700751 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700752
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800753 skb = napi_alloc_skb(napi, buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700754 if (!skb)
755 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700756
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700757 /*
758 * Copy to skb. This copy is needed here since the memory pointed by
759 * hv_netvsc_packet cannot be deallocated
760 */
Johannes Berg59ae1d12017-06-16 14:29:20 +0200761 skb_put_data(skb, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700762
763 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700764
765 /* skb is already created with CHECKSUM_NONE */
766 skb_checksum_none_assert(skb);
767
768 /*
769 * In Linux, the IP checksum is always checked.
770 * Do L4 checksum offload if enabled and present.
771 */
772 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
773 if (csum_info->receive.tcp_checksum_succeeded ||
774 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800775 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800776 }
777
stephen hemmingerdc54a082017-01-24 13:06:08 -0800778 if (vlan) {
779 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
780
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700781 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800782 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800783 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700784
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700785 return skb;
786}
787
788/*
789 * netvsc_recv_callback - Callback when we receive a packet from the
790 * "wire" on the specified device.
791 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800792int netvsc_recv_callback(struct net_device *net,
Stephen Hemminger345ac082017-12-12 16:48:38 -0800793 struct netvsc_device *net_device,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800794 struct vmbus_channel *channel,
795 void *data, u32 len,
796 const struct ndis_tcp_ip_checksum_info *csum_info,
797 const struct ndis_pkt_8021q_info *vlan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700798{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200799 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger742fe542017-02-27 10:26:50 -0800800 u16 q_idx = channel->offermsg.offer.sub_channel_index;
Stephen Hemminger345ac082017-12-12 16:48:38 -0800801 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700802 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700803 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700804
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700805 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700806 return NVSP_STAT_FAIL;
807
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700808 /* Allocate a skb - TODO direct I/O to pages? */
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800809 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
810 csum_info, vlan, data, len);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700811 if (unlikely(!skb)) {
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -0800812 ++net_device_ctx->eth_stats.rx_no_memory;
stephen hemminger0719e722017-01-11 09:16:32 -0800813 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700814 return NVSP_STAT_FAIL;
815 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700816
stephen hemminger0c195562017-08-01 19:58:53 -0700817 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700818
819 /*
820 * Even if injecting the packet, record the statistics
821 * on the synthetic device because modifying the VF device
822 * statistics will not work correctly.
823 */
stephen hemminger742fe542017-02-27 10:26:50 -0800824 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700825 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700826 rx_stats->packets++;
stephen hemmingerdc54a082017-01-24 13:06:08 -0800827 rx_stats->bytes += len;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700828
829 if (skb->pkt_type == PACKET_BROADCAST)
830 ++rx_stats->broadcast;
831 else if (skb->pkt_type == PACKET_MULTICAST)
832 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700833 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800834
stephen hemminger742fe542017-02-27 10:26:50 -0800835 napi_gro_receive(&nvchan->napi, skb);
Haiyang Zhang5c71dad2018-03-22 12:01:13 -0700836 return NVSP_STAT_SUCCESS;
Hank Janssenfceaf242009-07-13 15:34:54 -0700837}
838
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700839static void netvsc_get_drvinfo(struct net_device *net,
840 struct ethtool_drvinfo *info)
841{
Jiri Pirko7826d432013-01-06 00:44:26 +0000842 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000843 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700844}
845
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800846static void netvsc_get_channels(struct net_device *net,
847 struct ethtool_channels *channel)
848{
849 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700850 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800851
852 if (nvdev) {
853 channel->max_combined = nvdev->max_chn;
854 channel->combined_count = nvdev->num_chn;
855 }
856}
857
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700858static int netvsc_detach(struct net_device *ndev,
859 struct netvsc_device *nvdev)
860{
861 struct net_device_context *ndev_ctx = netdev_priv(ndev);
862 struct hv_device *hdev = ndev_ctx->device_ctx;
863 int ret;
864
865 /* Don't try continuing to try and setup sub channels */
866 if (cancel_work_sync(&nvdev->subchan_work))
867 nvdev->num_chn = 1;
868
869 /* If device was up (receiving) then shutdown */
870 if (netif_running(ndev)) {
871 netif_tx_disable(ndev);
872
873 ret = rndis_filter_close(nvdev);
874 if (ret) {
875 netdev_err(ndev,
876 "unable to close device (ret %d).\n", ret);
877 return ret;
878 }
879
880 ret = netvsc_wait_until_empty(nvdev);
881 if (ret) {
882 netdev_err(ndev,
883 "Ring buffer not empty after closing rndis\n");
884 return ret;
885 }
886 }
887
888 netif_device_detach(ndev);
889
890 rndis_filter_device_remove(hdev, nvdev);
891
892 return 0;
893}
894
895static int netvsc_attach(struct net_device *ndev,
896 struct netvsc_device_info *dev_info)
897{
898 struct net_device_context *ndev_ctx = netdev_priv(ndev);
899 struct hv_device *hdev = ndev_ctx->device_ctx;
900 struct netvsc_device *nvdev;
901 struct rndis_device *rdev;
902 int ret;
903
904 nvdev = rndis_filter_device_add(hdev, dev_info);
905 if (IS_ERR(nvdev))
906 return PTR_ERR(nvdev);
907
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700908 if (nvdev->num_chn > 1) {
909 ret = rndis_set_subchannel(ndev, nvdev);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700910
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700911 /* if unavailable, just proceed with one queue */
912 if (ret) {
913 nvdev->max_chn = 1;
914 nvdev->num_chn = 1;
915 }
916 }
917
918 /* In any case device is now ready */
919 netif_device_attach(ndev);
920
921 /* Note: enable and attach happen when sub-channels setup */
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700922 netif_carrier_off(ndev);
923
924 if (netif_running(ndev)) {
925 ret = rndis_filter_open(nvdev);
926 if (ret)
927 return ret;
928
929 rdev = nvdev->extension;
930 if (!rdev->link_state)
931 netif_carrier_on(ndev);
932 }
933
934 return 0;
935}
936
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700937static int netvsc_set_channels(struct net_device *net,
938 struct ethtool_channels *channels)
939{
940 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700941 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -0700942 unsigned int orig, count = channels->combined_count;
943 struct netvsc_device_info device_info;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700944 int ret;
stephen hemminger2b018882017-01-24 13:06:03 -0800945
946 /* We do not support separate count for rx, tx, or other */
947 if (count == 0 ||
948 channels->rx_count || channels->tx_count || channels->other_count)
949 return -EINVAL;
950
stephen hemmingera0be4502017-03-22 14:51:01 -0700951 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700952 return -ENODEV;
953
stephen hemminger2b018882017-01-24 13:06:03 -0800954 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700955 return -EINVAL;
956
stephen hemminger2b018882017-01-24 13:06:03 -0800957 if (count > nvdev->max_chn)
958 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700959
stephen hemminger7ca45932017-07-24 10:57:28 -0700960 orig = nvdev->num_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700961
stephen hemminger7ca45932017-07-24 10:57:28 -0700962 memset(&device_info, 0, sizeof(device_info));
963 device_info.num_chn = count;
stephen hemminger8b532792017-08-09 17:46:11 -0700964 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700965 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700966 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700967 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700968
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700969 ret = netvsc_detach(net, nvdev);
970 if (ret)
971 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700972
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700973 ret = netvsc_attach(net, &device_info);
974 if (ret) {
stephen hemminger7ca45932017-07-24 10:57:28 -0700975 device_info.num_chn = orig;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700976 if (netvsc_attach(net, &device_info))
977 netdev_err(net, "restoring channel setting failed\n");
stephen hemminger7ca45932017-07-24 10:57:28 -0700978 }
979
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700980 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700981}
982
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100983static bool
984netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800985{
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100986 struct ethtool_link_ksettings diff1 = *cmd;
987 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800988
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100989 diff1.base.speed = 0;
990 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800991 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100992 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
993 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800994 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100995 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800996
997 return !memcmp(&diff1, &diff2, sizeof(diff1));
998}
999
1000static void netvsc_init_settings(struct net_device *dev)
1001{
1002 struct net_device_context *ndc = netdev_priv(dev);
1003
Haiyang Zhang486e3982017-10-06 08:33:57 -07001004 ndc->l4_hash = HV_DEFAULT_L4HASH;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001005
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001006 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -07001007 ndc->duplex = DUPLEX_FULL;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001008}
1009
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001010static int netvsc_get_link_ksettings(struct net_device *dev,
1011 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001012{
1013 struct net_device_context *ndc = netdev_priv(dev);
1014
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001015 cmd->base.speed = ndc->speed;
1016 cmd->base.duplex = ndc->duplex;
1017 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001018
1019 return 0;
1020}
1021
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001022static int netvsc_set_link_ksettings(struct net_device *dev,
1023 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001024{
1025 struct net_device_context *ndc = netdev_priv(dev);
1026 u32 speed;
1027
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001028 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001029 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001030 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001031 !netvsc_validate_ethtool_ss_cmd(cmd))
1032 return -EINVAL;
1033
1034 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001035 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001036
1037 return 0;
1038}
1039
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001040static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1041{
1042 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger0c195562017-08-01 19:58:53 -07001043 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001044 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
stephen hemminger9749fed2017-07-19 11:53:16 -07001045 int orig_mtu = ndev->mtu;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001046 struct netvsc_device_info device_info;
stephen hemminger9749fed2017-07-19 11:53:16 -07001047 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001048
stephen hemmingera0be4502017-03-22 14:51:01 -07001049 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001050 return -ENODEV;
1051
stephen hemminger0c195562017-08-01 19:58:53 -07001052 /* Change MTU of underlying VF netdev first. */
1053 if (vf_netdev) {
1054 ret = dev_set_mtu(vf_netdev, mtu);
1055 if (ret)
1056 return ret;
1057 }
1058
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001059 memset(&device_info, 0, sizeof(device_info));
stephen hemminger2b018882017-01-24 13:06:03 -08001060 device_info.num_chn = nvdev->num_chn;
stephen hemminger8b532792017-08-09 17:46:11 -07001061 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -07001062 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001063 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -07001064 device_info.recv_section_size = nvdev->recv_section_size;
Dexuan Cui152669b2017-03-02 13:00:53 +00001065
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001066 ret = netvsc_detach(ndev, nvdev);
1067 if (ret)
1068 goto rollback_vf;
Dexuan Cui152669b2017-03-02 13:00:53 +00001069
Dexuan Cui152669b2017-03-02 13:00:53 +00001070 ndev->mtu = mtu;
1071
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001072 ret = netvsc_attach(ndev, &device_info);
1073 if (ret)
1074 goto rollback;
stephen hemminger9749fed2017-07-19 11:53:16 -07001075
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001076 return 0;
stephen hemminger0c195562017-08-01 19:58:53 -07001077
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001078rollback:
1079 /* Attempt rollback to original MTU */
1080 ndev->mtu = orig_mtu;
stephen hemminger68d715f2017-08-09 17:46:06 -07001081
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001082 if (netvsc_attach(ndev, &device_info))
1083 netdev_err(ndev, "restoring mtu failed\n");
1084rollback_vf:
1085 if (vf_netdev)
1086 dev_set_mtu(vf_netdev, orig_mtu);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001087
stephen hemminger9749fed2017-07-19 11:53:16 -07001088 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001089}
1090
stephen hemminger0c195562017-08-01 19:58:53 -07001091static void netvsc_get_vf_stats(struct net_device *net,
1092 struct netvsc_vf_pcpu_stats *tot)
1093{
1094 struct net_device_context *ndev_ctx = netdev_priv(net);
1095 int i;
1096
1097 memset(tot, 0, sizeof(*tot));
1098
1099 for_each_possible_cpu(i) {
1100 const struct netvsc_vf_pcpu_stats *stats
1101 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1102 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1103 unsigned int start;
1104
1105 do {
1106 start = u64_stats_fetch_begin_irq(&stats->syncp);
1107 rx_packets = stats->rx_packets;
1108 tx_packets = stats->tx_packets;
1109 rx_bytes = stats->rx_bytes;
1110 tx_bytes = stats->tx_bytes;
1111 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1112
1113 tot->rx_packets += rx_packets;
1114 tot->tx_packets += tx_packets;
1115 tot->rx_bytes += rx_bytes;
1116 tot->tx_bytes += tx_bytes;
1117 tot->tx_dropped += stats->tx_dropped;
1118 }
1119}
1120
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001121static void netvsc_get_stats64(struct net_device *net,
1122 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001123{
1124 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -07001125 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001126 struct netvsc_vf_pcpu_stats vf_tot;
stephen hemminger89bb42b2017-08-09 17:46:08 -07001127 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001128
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001129 if (!nvdev)
1130 return;
1131
stephen hemminger0c195562017-08-01 19:58:53 -07001132 netdev_stats_to_stats64(t, &net->stats);
1133
1134 netvsc_get_vf_stats(net, &vf_tot);
1135 t->rx_packets += vf_tot.rx_packets;
1136 t->tx_packets += vf_tot.tx_packets;
1137 t->rx_bytes += vf_tot.rx_bytes;
1138 t->tx_bytes += vf_tot.tx_bytes;
1139 t->tx_dropped += vf_tot.tx_dropped;
1140
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001141 for (i = 0; i < nvdev->num_chn; i++) {
1142 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1143 const struct netvsc_stats *stats;
1144 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001145 unsigned int start;
1146
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001147 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001148 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001149 start = u64_stats_fetch_begin_irq(&stats->syncp);
1150 packets = stats->packets;
1151 bytes = stats->bytes;
1152 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001153
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001154 t->tx_bytes += bytes;
1155 t->tx_packets += packets;
1156
1157 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001158 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001159 start = u64_stats_fetch_begin_irq(&stats->syncp);
1160 packets = stats->packets;
1161 bytes = stats->bytes;
1162 multicast = stats->multicast + stats->broadcast;
1163 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001164
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001165 t->rx_bytes += bytes;
1166 t->rx_packets += packets;
1167 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001168 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001169}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001170
1171static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1172{
stephen hemminger867047c2017-07-28 08:59:42 -07001173 struct net_device_context *ndc = netdev_priv(ndev);
stephen hemminger16ba3262017-08-09 17:46:05 -07001174 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
stephen hemminger867047c2017-07-28 08:59:42 -07001175 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001176 struct sockaddr *addr = p;
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001177 int err;
1178
stephen hemminger16ba3262017-08-09 17:46:05 -07001179 err = eth_prepare_mac_addr_change(ndev, p);
1180 if (err)
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001181 return err;
1182
stephen hemminger867047c2017-07-28 08:59:42 -07001183 if (!nvdev)
1184 return -ENODEV;
1185
stephen hemminger16ba3262017-08-09 17:46:05 -07001186 if (vf_netdev) {
1187 err = dev_set_mac_address(vf_netdev, addr);
1188 if (err)
1189 return err;
1190 }
1191
stephen hemminger867047c2017-07-28 08:59:42 -07001192 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
stephen hemminger16ba3262017-08-09 17:46:05 -07001193 if (!err) {
1194 eth_commit_mac_addr_change(ndev, p);
1195 } else if (vf_netdev) {
1196 /* rollback change on VF */
1197 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1198 dev_set_mac_address(vf_netdev, addr);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001199 }
1200
1201 return err;
1202}
1203
Stephen Hemminger4323b472016-08-23 12:17:57 -07001204static const struct {
1205 char name[ETH_GSTRING_LEN];
1206 u16 offset;
1207} netvsc_stats[] = {
1208 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001209 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001210 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1211 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1212 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
stephen hemmingercad5c192017-08-09 17:46:12 -07001213 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1214 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001215 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
Simon Xiao09af87d2017-09-29 11:39:46 -07001216 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1217 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
stephen hemminger0c195562017-08-01 19:58:53 -07001218}, vf_stats[] = {
1219 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1220 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1221 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1222 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1223 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001224};
1225
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001226#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
stephen hemminger0c195562017-08-01 19:58:53 -07001227#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001228
1229/* 4 statistics per queue (rx/tx packets/bytes) */
1230#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1231
Stephen Hemminger4323b472016-08-23 12:17:57 -07001232static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1233{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001234 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001235 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001236
1237 if (!nvdev)
1238 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001239
Stephen Hemminger4323b472016-08-23 12:17:57 -07001240 switch (string_set) {
1241 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001242 return NETVSC_GLOBAL_STATS_LEN
1243 + NETVSC_VF_STATS_LEN
1244 + NETVSC_QUEUE_STATS_LEN(nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001245 default:
1246 return -EINVAL;
1247 }
1248}
1249
1250static void netvsc_get_ethtool_stats(struct net_device *dev,
1251 struct ethtool_stats *stats, u64 *data)
1252{
1253 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001254 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001255 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001256 const struct netvsc_stats *qstats;
stephen hemminger0c195562017-08-01 19:58:53 -07001257 struct netvsc_vf_pcpu_stats sum;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001258 unsigned int start;
1259 u64 packets, bytes;
1260 int i, j;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001261
stephen hemminger545a8e72017-03-22 14:51:00 -07001262 if (!nvdev)
1263 return;
1264
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001265 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001266 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001267
stephen hemminger0c195562017-08-01 19:58:53 -07001268 netvsc_get_vf_stats(dev, &sum);
1269 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1270 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1271
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001272 for (j = 0; j < nvdev->num_chn; j++) {
1273 qstats = &nvdev->chan_table[j].tx_stats;
1274
1275 do {
1276 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1277 packets = qstats->packets;
1278 bytes = qstats->bytes;
1279 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1280 data[i++] = packets;
1281 data[i++] = bytes;
1282
1283 qstats = &nvdev->chan_table[j].rx_stats;
1284 do {
1285 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1286 packets = qstats->packets;
1287 bytes = qstats->bytes;
1288 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1289 data[i++] = packets;
1290 data[i++] = bytes;
1291 }
Stephen Hemminger4323b472016-08-23 12:17:57 -07001292}
1293
1294static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1295{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001296 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001297 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001298 u8 *p = data;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001299 int i;
1300
stephen hemminger545a8e72017-03-22 14:51:00 -07001301 if (!nvdev)
1302 return;
1303
Stephen Hemminger4323b472016-08-23 12:17:57 -07001304 switch (stringset) {
1305 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001306 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1307 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1308 p += ETH_GSTRING_LEN;
1309 }
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001310
stephen hemminger0c195562017-08-01 19:58:53 -07001311 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1312 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1313 p += ETH_GSTRING_LEN;
1314 }
1315
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001316 for (i = 0; i < nvdev->num_chn; i++) {
1317 sprintf(p, "tx_queue_%u_packets", i);
1318 p += ETH_GSTRING_LEN;
1319 sprintf(p, "tx_queue_%u_bytes", i);
1320 p += ETH_GSTRING_LEN;
1321 sprintf(p, "rx_queue_%u_packets", i);
1322 p += ETH_GSTRING_LEN;
1323 sprintf(p, "rx_queue_%u_bytes", i);
1324 p += ETH_GSTRING_LEN;
1325 }
1326
Stephen Hemminger4323b472016-08-23 12:17:57 -07001327 break;
1328 }
1329}
1330
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001331static int
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001332netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1333 struct ethtool_rxnfc *info)
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001334{
Haiyang Zhang486e3982017-10-06 08:33:57 -07001335 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1336
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001337 info->data = RXH_IP_SRC | RXH_IP_DST;
1338
1339 switch (info->flow_type) {
1340 case TCP_V4_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001341 if (ndc->l4_hash & HV_TCP4_L4HASH)
1342 info->data |= l4_flag;
1343
1344 break;
1345
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001346 case TCP_V6_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001347 if (ndc->l4_hash & HV_TCP6_L4HASH)
1348 info->data |= l4_flag;
1349
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001350 break;
1351
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001352 case UDP_V4_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001353 if (ndc->l4_hash & HV_UDP4_L4HASH)
1354 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001355
1356 break;
1357
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001358 case UDP_V6_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001359 if (ndc->l4_hash & HV_UDP6_L4HASH)
1360 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001361
1362 break;
1363
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001364 case IPV4_FLOW:
1365 case IPV6_FLOW:
1366 break;
1367 default:
1368 info->data = 0;
1369 break;
1370 }
1371
1372 return 0;
1373}
1374
1375static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001376netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1377 u32 *rules)
1378{
1379 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001380 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001381
1382 if (!nvdev)
1383 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001384
1385 switch (info->cmd) {
1386 case ETHTOOL_GRXRINGS:
1387 info->data = nvdev->num_chn;
1388 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001389
1390 case ETHTOOL_GRXFH:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001391 return netvsc_get_rss_hash_opts(ndc, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001392 }
1393 return -EOPNOTSUPP;
1394}
1395
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001396static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1397 struct ethtool_rxnfc *info)
1398{
1399 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1400 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001401 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001402 case TCP_V4_FLOW:
1403 ndc->l4_hash |= HV_TCP4_L4HASH;
1404 break;
1405
1406 case TCP_V6_FLOW:
1407 ndc->l4_hash |= HV_TCP6_L4HASH;
1408 break;
1409
Haiyang Zhang486e3982017-10-06 08:33:57 -07001410 case UDP_V4_FLOW:
1411 ndc->l4_hash |= HV_UDP4_L4HASH;
1412 break;
1413
1414 case UDP_V6_FLOW:
1415 ndc->l4_hash |= HV_UDP6_L4HASH;
1416 break;
1417
1418 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001419 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001420 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001421
1422 return 0;
1423 }
1424
1425 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001426 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001427 case TCP_V4_FLOW:
1428 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1429 break;
1430
1431 case TCP_V6_FLOW:
1432 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1433 break;
1434
Haiyang Zhang486e3982017-10-06 08:33:57 -07001435 case UDP_V4_FLOW:
1436 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1437 break;
1438
1439 case UDP_V6_FLOW:
1440 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1441 break;
1442
1443 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001444 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001445 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001446
1447 return 0;
1448 }
1449
1450 return -EOPNOTSUPP;
1451}
1452
1453static int
1454netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1455{
1456 struct net_device_context *ndc = netdev_priv(ndev);
1457
1458 if (info->cmd == ETHTOOL_SRXFH)
1459 return netvsc_set_rss_hash_opts(ndc, info);
1460
1461 return -EOPNOTSUPP;
1462}
1463
Richard Weinberger316158f2014-07-09 16:23:59 +02001464#ifdef CONFIG_NET_POLL_CONTROLLER
stephen hemmingera5ecd432017-06-07 15:53:48 -07001465static void netvsc_poll_controller(struct net_device *dev)
Richard Weinberger316158f2014-07-09 16:23:59 +02001466{
stephen hemmingera5ecd432017-06-07 15:53:48 -07001467 struct net_device_context *ndc = netdev_priv(dev);
1468 struct netvsc_device *ndev;
1469 int i;
1470
1471 rcu_read_lock();
1472 ndev = rcu_dereference(ndc->nvdev);
1473 if (ndev) {
1474 for (i = 0; i < ndev->num_chn; i++) {
1475 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1476
1477 napi_schedule(&nvchan->napi);
1478 }
1479 }
1480 rcu_read_unlock();
Richard Weinberger316158f2014-07-09 16:23:59 +02001481}
1482#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001483
stephen hemminger962f3fe2017-01-24 13:06:02 -08001484static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1485{
1486 return NETVSC_HASH_KEYLEN;
1487}
1488
1489static u32 netvsc_rss_indir_size(struct net_device *dev)
1490{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001491 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001492}
1493
1494static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1495 u8 *hfunc)
1496{
1497 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001498 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001499 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001500 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001501
stephen hemminger545a8e72017-03-22 14:51:00 -07001502 if (!ndev)
1503 return -ENODEV;
1504
stephen hemminger962f3fe2017-01-24 13:06:02 -08001505 if (hfunc)
1506 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1507
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001508 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001509 if (indir) {
1510 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001511 indir[i] = rndis_dev->rx_table[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001512 }
1513
stephen hemminger962f3fe2017-01-24 13:06:02 -08001514 if (key)
1515 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1516
1517 return 0;
1518}
1519
1520static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1521 const u8 *key, const u8 hfunc)
1522{
1523 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001524 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001525 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001526 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001527
stephen hemminger545a8e72017-03-22 14:51:00 -07001528 if (!ndev)
1529 return -ENODEV;
1530
stephen hemminger962f3fe2017-01-24 13:06:02 -08001531 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1532 return -EOPNOTSUPP;
1533
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001534 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001535 if (indir) {
1536 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhangdb3cd7a2017-09-01 14:30:07 -07001537 if (indir[i] >= ndev->num_chn)
stephen hemmingerff4a4412017-01-24 13:06:04 -08001538 return -EINVAL;
1539
1540 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001541 rndis_dev->rx_table[i] = indir[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001542 }
1543
1544 if (!key) {
1545 if (!indir)
1546 return 0;
1547
1548 key = rndis_dev->rss_key;
1549 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001550
Haiyang Zhang715e2ec2017-09-01 14:30:04 -07001551 return rndis_filter_set_rss_param(rndis_dev, key);
stephen hemminger962f3fe2017-01-24 13:06:02 -08001552}
1553
stephen hemminger8b532792017-08-09 17:46:11 -07001554/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1555 * It does have pre-allocated receive area which is divided into sections.
1556 */
1557static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1558 struct ethtool_ringparam *ring)
1559{
1560 u32 max_buf_size;
1561
1562 ring->rx_pending = nvdev->recv_section_cnt;
1563 ring->tx_pending = nvdev->send_section_cnt;
1564
1565 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1566 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1567 else
1568 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1569
1570 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1571 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1572 / nvdev->send_section_size;
1573}
1574
1575static void netvsc_get_ringparam(struct net_device *ndev,
1576 struct ethtool_ringparam *ring)
1577{
1578 struct net_device_context *ndevctx = netdev_priv(ndev);
1579 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1580
1581 if (!nvdev)
1582 return;
1583
1584 __netvsc_get_ringparam(nvdev, ring);
1585}
1586
1587static int netvsc_set_ringparam(struct net_device *ndev,
1588 struct ethtool_ringparam *ring)
1589{
1590 struct net_device_context *ndevctx = netdev_priv(ndev);
1591 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
stephen hemminger8b532792017-08-09 17:46:11 -07001592 struct netvsc_device_info device_info;
1593 struct ethtool_ringparam orig;
1594 u32 new_tx, new_rx;
stephen hemminger8b532792017-08-09 17:46:11 -07001595 int ret = 0;
1596
1597 if (!nvdev || nvdev->destroy)
1598 return -ENODEV;
1599
1600 memset(&orig, 0, sizeof(orig));
1601 __netvsc_get_ringparam(nvdev, &orig);
1602
1603 new_tx = clamp_t(u32, ring->tx_pending,
1604 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1605 new_rx = clamp_t(u32, ring->rx_pending,
1606 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1607
1608 if (new_tx == orig.tx_pending &&
1609 new_rx == orig.rx_pending)
1610 return 0; /* no change */
1611
1612 memset(&device_info, 0, sizeof(device_info));
1613 device_info.num_chn = nvdev->num_chn;
stephen hemminger8b532792017-08-09 17:46:11 -07001614 device_info.send_sections = new_tx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001615 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001616 device_info.recv_sections = new_rx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001617 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001618
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001619 ret = netvsc_detach(ndev, nvdev);
1620 if (ret)
1621 return ret;
stephen hemminger8b532792017-08-09 17:46:11 -07001622
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001623 ret = netvsc_attach(ndev, &device_info);
1624 if (ret) {
stephen hemminger8b532792017-08-09 17:46:11 -07001625 device_info.send_sections = orig.tx_pending;
1626 device_info.recv_sections = orig.rx_pending;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001627
1628 if (netvsc_attach(ndev, &device_info))
1629 netdev_err(ndev, "restoring ringparam failed");
stephen hemminger8b532792017-08-09 17:46:11 -07001630 }
1631
stephen hemminger8b532792017-08-09 17:46:11 -07001632 return ret;
1633}
1634
Haiyang Zhang273de022018-05-22 11:29:34 -07001635static u32 netvsc_get_msglevel(struct net_device *ndev)
1636{
1637 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1638
1639 return ndev_ctx->msg_enable;
1640}
1641
1642static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1643{
1644 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1645
1646 ndev_ctx->msg_enable = val;
1647}
1648
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001649static const struct ethtool_ops ethtool_ops = {
1650 .get_drvinfo = netvsc_get_drvinfo,
Haiyang Zhang273de022018-05-22 11:29:34 -07001651 .get_msglevel = netvsc_get_msglevel,
1652 .set_msglevel = netvsc_set_msglevel,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001653 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001654 .get_ethtool_stats = netvsc_get_ethtool_stats,
1655 .get_sset_count = netvsc_get_sset_count,
1656 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001657 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001658 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001659 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001660 .get_rxnfc = netvsc_get_rxnfc,
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001661 .set_rxnfc = netvsc_set_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001662 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1663 .get_rxfh_indir_size = netvsc_rss_indir_size,
1664 .get_rxfh = netvsc_get_rxfh,
1665 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001666 .get_link_ksettings = netvsc_get_link_ksettings,
1667 .set_link_ksettings = netvsc_set_link_ksettings,
stephen hemminger8b532792017-08-09 17:46:11 -07001668 .get_ringparam = netvsc_get_ringparam,
1669 .set_ringparam = netvsc_set_ringparam,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001670};
1671
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001672static const struct net_device_ops device_ops = {
1673 .ndo_open = netvsc_open,
1674 .ndo_stop = netvsc_close,
1675 .ndo_start_xmit = netvsc_start_xmit,
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001676 .ndo_change_rx_flags = netvsc_change_rx_flags,
1677 .ndo_set_rx_mode = netvsc_set_rx_mode,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001678 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001679 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001680 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001681 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001682 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001683#ifdef CONFIG_NET_POLL_CONTROLLER
1684 .ndo_poll_controller = netvsc_poll_controller,
1685#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001686};
1687
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001688/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001689 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1690 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1691 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001692 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001693static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001694{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001695 struct net_device_context *ndev_ctx =
1696 container_of(w, struct net_device_context, dwork.work);
1697 struct hv_device *device_obj = ndev_ctx->device_ctx;
1698 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001699 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001700 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001701 struct netvsc_reconfig *event = NULL;
1702 bool notify = false, reschedule = false;
1703 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001704
stephen hemminger9b4e9462017-08-24 16:49:16 -07001705 /* if changes are happening, comeback later */
1706 if (!rtnl_trylock()) {
1707 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1708 return;
1709 }
1710
stephen hemmingera0be4502017-03-22 14:51:01 -07001711 net_device = rtnl_dereference(ndev_ctx->nvdev);
1712 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001713 goto out_unlock;
1714
Haiyang Zhang891de742014-02-12 16:54:27 -08001715 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001716
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001717 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1718 if (time_is_after_jiffies(next_reconfig)) {
1719 /* link_watch only sends one notification with current state
1720 * per second, avoid doing reconfig more frequently. Handle
1721 * wrap around.
1722 */
1723 delay = next_reconfig - jiffies;
1724 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1725 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001726 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001727 }
1728 ndev_ctx->last_reconfig = jiffies;
1729
1730 spin_lock_irqsave(&ndev_ctx->lock, flags);
1731 if (!list_empty(&ndev_ctx->reconfig_events)) {
1732 event = list_first_entry(&ndev_ctx->reconfig_events,
1733 struct netvsc_reconfig, list);
1734 list_del(&event->list);
1735 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1736 }
1737 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1738
1739 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001740 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001741
1742 switch (event->event) {
1743 /* Only the following events are possible due to the check in
1744 * netvsc_linkstatus_callback()
1745 */
1746 case RNDIS_STATUS_MEDIA_CONNECT:
1747 if (rdev->link_state) {
1748 rdev->link_state = false;
stephen hemminger0c195562017-08-01 19:58:53 -07001749 netif_carrier_on(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001750 netif_tx_wake_all_queues(net);
1751 } else {
1752 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001753 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001754 kfree(event);
1755 break;
1756 case RNDIS_STATUS_MEDIA_DISCONNECT:
1757 if (!rdev->link_state) {
1758 rdev->link_state = true;
1759 netif_carrier_off(net);
1760 netif_tx_stop_all_queues(net);
1761 }
1762 kfree(event);
1763 break;
1764 case RNDIS_STATUS_NETWORK_CHANGE:
1765 /* Only makes sense if carrier is present */
1766 if (!rdev->link_state) {
1767 rdev->link_state = true;
1768 netif_carrier_off(net);
1769 netif_tx_stop_all_queues(net);
1770 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1771 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001772 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001773 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1774 reschedule = true;
1775 }
1776 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001777 }
1778
1779 rtnl_unlock();
1780
1781 if (notify)
1782 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001783
1784 /* link_watch only sends one notification with current state per
1785 * second, handle next reconfig event in 2 seconds.
1786 */
1787 if (reschedule)
1788 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001789
1790 return;
1791
1792out_unlock:
1793 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001794}
1795
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001796static struct net_device *get_netvsc_bymac(const u8 *mac)
1797{
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001798 struct net_device_context *ndev_ctx;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001799
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001800 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
1801 struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001802
1803 if (ether_addr_equal(mac, dev->perm_addr))
1804 return dev;
1805 }
1806
1807 return NULL;
1808}
1809
1810static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1811{
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001812 struct net_device_context *net_device_ctx;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001813 struct net_device *dev;
1814
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001815 dev = netdev_master_upper_dev_get(vf_netdev);
1816 if (!dev || dev->netdev_ops != &device_ops)
1817 return NULL; /* not a netvsc device */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001818
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001819 net_device_ctx = netdev_priv(dev);
1820 if (!rtnl_dereference(net_device_ctx->nvdev))
1821 return NULL; /* device is removed */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001822
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001823 return dev;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001824}
1825
stephen hemminger0c195562017-08-01 19:58:53 -07001826/* Called when VF is injecting data into network stack.
1827 * Change the associated network device from VF to netvsc.
1828 * note: already called with rcu_read_lock
1829 */
1830static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1831{
1832 struct sk_buff *skb = *pskb;
1833 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1834 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1835 struct netvsc_vf_pcpu_stats *pcpu_stats
1836 = this_cpu_ptr(ndev_ctx->vf_stats);
1837
1838 skb->dev = ndev;
1839
1840 u64_stats_update_begin(&pcpu_stats->syncp);
1841 pcpu_stats->rx_packets++;
1842 pcpu_stats->rx_bytes += skb->len;
1843 u64_stats_update_end(&pcpu_stats->syncp);
1844
1845 return RX_HANDLER_ANOTHER;
1846}
1847
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001848static int netvsc_vf_join(struct net_device *vf_netdev,
1849 struct net_device *ndev)
1850{
1851 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1852 int ret;
1853
1854 ret = netdev_rx_handler_register(vf_netdev,
1855 netvsc_vf_handle_frame, ndev);
1856 if (ret != 0) {
1857 netdev_err(vf_netdev,
1858 "can not register netvsc VF receive handler (err = %d)\n",
1859 ret);
1860 goto rx_handler_failed;
1861 }
1862
1863 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
1864 NULL, NULL, NULL);
1865 if (ret != 0) {
1866 netdev_err(vf_netdev,
1867 "can not set master device %s (err = %d)\n",
1868 ndev->name, ret);
1869 goto upper_link_failed;
1870 }
1871
1872 /* set slave flag before open to prevent IPv6 addrconf */
1873 vf_netdev->flags |= IFF_SLAVE;
1874
1875 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1876
1877 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
1878
1879 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
1880 return 0;
1881
1882upper_link_failed:
1883 netdev_rx_handler_unregister(vf_netdev);
1884rx_handler_failed:
1885 return ret;
1886}
1887
stephen hemminger0c195562017-08-01 19:58:53 -07001888static void __netvsc_vf_setup(struct net_device *ndev,
1889 struct net_device *vf_netdev)
1890{
1891 int ret;
1892
stephen hemminger0c195562017-08-01 19:58:53 -07001893 /* Align MTU of VF with master */
1894 ret = dev_set_mtu(vf_netdev, ndev->mtu);
1895 if (ret)
1896 netdev_warn(vf_netdev,
1897 "unable to change mtu to %u\n", ndev->mtu);
1898
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001899 /* set multicast etc flags on VF */
1900 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08001901
1902 /* sync address list from ndev to VF */
1903 netif_addr_lock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001904 dev_uc_sync(vf_netdev, ndev);
1905 dev_mc_sync(vf_netdev, ndev);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08001906 netif_addr_unlock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001907
stephen hemminger0c195562017-08-01 19:58:53 -07001908 if (netif_running(ndev)) {
1909 ret = dev_open(vf_netdev);
1910 if (ret)
1911 netdev_warn(vf_netdev,
1912 "unable to open: %d\n", ret);
1913 }
1914}
1915
1916/* Setup VF as slave of the synthetic device.
1917 * Runs in workqueue to avoid recursion in netlink callbacks.
1918 */
1919static void netvsc_vf_setup(struct work_struct *w)
1920{
1921 struct net_device_context *ndev_ctx
stephen hemminger6123c662017-08-09 17:46:03 -07001922 = container_of(w, struct net_device_context, vf_takeover.work);
stephen hemminger0c195562017-08-01 19:58:53 -07001923 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
1924 struct net_device *vf_netdev;
1925
stephen hemmingerfb84af82017-08-04 12:14:00 -07001926 if (!rtnl_trylock()) {
stephen hemminger6123c662017-08-09 17:46:03 -07001927 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
stephen hemmingerfb84af82017-08-04 12:14:00 -07001928 return;
1929 }
1930
stephen hemminger0c195562017-08-01 19:58:53 -07001931 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1932 if (vf_netdev)
1933 __netvsc_vf_setup(ndev, vf_netdev);
1934
1935 rtnl_unlock();
1936}
1937
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001938static int netvsc_register_vf(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001939{
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001940 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001941 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001942 struct netvsc_device *netvsc_dev;
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07001943 int ret;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001944
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001945 if (vf_netdev->addr_len != ETH_ALEN)
1946 return NOTIFY_DONE;
1947
1948 /*
1949 * We will use the MAC address to locate the synthetic interface to
1950 * associate with the VF interface. If we don't find a matching
1951 * synthetic interface, move on.
1952 */
1953 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1954 if (!ndev)
1955 return NOTIFY_DONE;
1956
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001957 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001958 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001959 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001960 return NOTIFY_DONE;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001961
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07001962 /* if syntihetic interface is a different namespace,
1963 * then move the VF to that namespace; join will be
1964 * done again in that context.
1965 */
1966 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
1967 ret = dev_change_net_namespace(vf_netdev,
1968 dev_net(ndev), "eth%d");
1969 if (ret)
1970 netdev_err(vf_netdev,
1971 "could not move to same namespace as %s: %d\n",
1972 ndev->name, ret);
1973 else
1974 netdev_info(vf_netdev,
1975 "VF moved to namespace with: %s\n",
1976 ndev->name);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001977 return NOTIFY_DONE;
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07001978 }
stephen hemminger0c195562017-08-01 19:58:53 -07001979
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001980 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
stephen hemminger0c195562017-08-01 19:58:53 -07001981
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07001982 if (netvsc_vf_join(vf_netdev, ndev) != 0)
1983 return NOTIFY_DONE;
1984
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001985 dev_hold(vf_netdev);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001986 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1987 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001988}
1989
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001990/* VF up/down change detected, schedule to change data path */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001991static int netvsc_vf_changed(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001992{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001993 struct net_device_context *net_device_ctx;
stephen hemminger7b83f522017-08-07 11:30:00 -07001994 struct netvsc_device *netvsc_dev;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001995 struct net_device *ndev;
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001996 bool vf_is_up = netif_running(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001997
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001998 ndev = get_netvsc_byref(vf_netdev);
1999 if (!ndev)
2000 return NOTIFY_DONE;
2001
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002002 net_device_ctx = netdev_priv(ndev);
stephen hemminger7b83f522017-08-07 11:30:00 -07002003 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2004 if (!netvsc_dev)
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002005 return NOTIFY_DONE;
stephen hemminger7b83f522017-08-07 11:30:00 -07002006
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002007 netvsc_switch_datapath(ndev, vf_is_up);
2008 netdev_info(ndev, "Data path switched %s VF: %s\n",
2009 vf_is_up ? "to" : "from", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002010
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002011 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002012}
2013
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002014static int netvsc_unregister_vf(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002015{
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002016 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002017 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002018
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002019 ndev = get_netvsc_byref(vf_netdev);
2020 if (!ndev)
2021 return NOTIFY_DONE;
2022
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002023 net_device_ctx = netdev_priv(ndev);
stephen hemminger6123c662017-08-09 17:46:03 -07002024 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07002025
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002026 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002027
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002028 netdev_rx_handler_unregister(vf_netdev);
2029 netdev_upper_dev_unlink(vf_netdev, ndev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002030 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07002031 dev_put(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002032
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002033 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002034}
2035
K. Y. Srinivasan84946892011-09-13 10:59:38 -07002036static int netvsc_probe(struct hv_device *dev,
2037 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002038{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002039 struct net_device *net = NULL;
2040 struct net_device_context *net_device_ctx;
2041 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07002042 struct netvsc_device *nvdev;
stephen hemminger0c195562017-08-01 19:58:53 -07002043 int ret = -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002044
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07002045 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08002046 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002047 if (!net)
stephen hemminger0c195562017-08-01 19:58:53 -07002048 goto no_net;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002049
Haiyang Zhang1b07da52014-03-04 14:11:06 -08002050 netif_carrier_off(net);
2051
Haiyang Zhangb37879e2016-08-04 10:42:14 -07002052 netvsc_init_settings(net);
2053
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002054 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07002055 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07002056 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2057 if (netif_msg_probe(net_device_ctx))
2058 netdev_dbg(net, "netvsc msg_enable: %d\n",
2059 net_device_ctx->msg_enable);
2060
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002061 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02002062
Haiyang Zhang891de742014-02-12 16:54:27 -08002063 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002064
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01002065 spin_lock_init(&net_device_ctx->lock);
2066 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
stephen hemminger6123c662017-08-09 17:46:03 -07002067 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
stephen hemminger0c195562017-08-01 19:58:53 -07002068
2069 net_device_ctx->vf_stats
2070 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2071 if (!net_device_ctx->vf_stats)
2072 goto no_stats;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01002073
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002074 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002075 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07002076 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002077
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01002078 /* We always need headroom for rndis header */
2079 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2080
Haiyang Zhang6450f8f2017-09-22 15:31:38 -07002081 /* Initialize the number of queues to be 1, we may change it if more
2082 * channels are offered later.
2083 */
2084 netif_set_real_num_tx_queues(net, 1);
2085 netif_set_real_num_rx_queues(net, 1);
2086
Haiyang Zhang692e0842011-09-01 12:19:43 -07002087 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07002088 memset(&device_info, 0, sizeof(device_info));
stephen hemminger3071ada2017-03-22 14:50:59 -07002089 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
stephen hemminger8b532792017-08-09 17:46:11 -07002090 device_info.send_sections = NETVSC_DEFAULT_TX;
Alex Ng0ab09be2017-09-20 11:17:35 -07002091 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
stephen hemminger8b532792017-08-09 17:46:11 -07002092 device_info.recv_sections = NETVSC_DEFAULT_RX;
Alex Ng0ab09be2017-09-20 11:17:35 -07002093 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
stephen hemminger9749fed2017-07-19 11:53:16 -07002094
2095 nvdev = rndis_filter_device_add(dev, &device_info);
2096 if (IS_ERR(nvdev)) {
2097 ret = PTR_ERR(nvdev);
Haiyang Zhang692e0842011-09-01 12:19:43 -07002098 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
stephen hemminger0c195562017-08-01 19:58:53 -07002099 goto rndis_failed;
Haiyang Zhang692e0842011-09-01 12:19:43 -07002100 }
stephen hemminger0c195562017-08-01 19:58:53 -07002101
Haiyang Zhang692e0842011-09-01 12:19:43 -07002102 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2103
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -07002104 if (nvdev->num_chn > 1)
2105 schedule_work(&nvdev->subchan_work);
2106
Vitaly Kuznetsovaefd80e2017-11-15 15:12:55 +01002107 /* hw_features computed in rndis_netdev_set_hwcaps() */
stephen hemminger23312a32017-01-24 13:05:59 -08002108 net->features = net->hw_features |
2109 NETIF_F_HIGHDMA | NETIF_F_SG |
2110 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2111 net->vlan_features = net->features;
2112
stephen hemminger9749fed2017-07-19 11:53:16 -07002113 netdev_lockdep_set_classes(net);
2114
Jarod Wilsond0c2c992016-10-20 13:55:21 -04002115 /* MTU range: 68 - 1500 or 65521 */
2116 net->min_mtu = NETVSC_MTU_MIN;
2117 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2118 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2119 else
2120 net->max_mtu = ETH_DATA_LEN;
2121
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002122 rtnl_lock();
2123 ret = register_netdevice(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002124 if (ret != 0) {
2125 pr_err("Unable to register netdev.\n");
stephen hemminger0c195562017-08-01 19:58:53 -07002126 goto register_failed;
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002127 }
2128
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002129 list_add(&net_device_ctx->list, &netvsc_dev_list);
2130 rtnl_unlock();
2131 return 0;
stephen hemminger0c195562017-08-01 19:58:53 -07002132
2133register_failed:
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002134 rtnl_unlock();
stephen hemminger0c195562017-08-01 19:58:53 -07002135 rndis_filter_device_remove(dev, nvdev);
2136rndis_failed:
2137 free_percpu(net_device_ctx->vf_stats);
2138no_stats:
2139 hv_set_drvdata(dev, NULL);
2140 free_netdev(net);
2141no_net:
2142 return ret;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002143}
2144
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002145static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002146{
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002147 struct net_device_context *ndev_ctx;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002148 struct net_device *vf_netdev, *net;
2149 struct netvsc_device *nvdev;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002150
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002151 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002152 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002153 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002154 return 0;
2155 }
2156
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002157 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002158
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002159 cancel_delayed_work_sync(&ndev_ctx->dwork);
2160
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002161 rcu_read_lock();
2162 nvdev = rcu_dereference(ndev_ctx->nvdev);
2163
2164 if (nvdev)
2165 cancel_work_sync(&nvdev->subchan_work);
2166
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002167 /*
2168 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07002169 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002170 */
stephen hemmingera0be4502017-03-22 14:51:01 -07002171 rtnl_lock();
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002172 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2173 if (vf_netdev)
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002174 netvsc_unregister_vf(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002175
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002176 if (nvdev)
2177 rndis_filter_device_remove(dev, nvdev);
2178
Stephen Hemminger8195b132017-09-06 13:53:05 -07002179 unregister_netdevice(net);
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002180 list_del(&ndev_ctx->list);
Stephen Hemminger8195b132017-09-06 13:53:05 -07002181
stephen hemmingera0be4502017-03-22 14:51:01 -07002182 rtnl_unlock();
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002183 rcu_read_unlock();
stephen hemmingera0be4502017-03-22 14:51:01 -07002184
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002185 hv_set_drvdata(dev, NULL);
2186
stephen hemminger0c195562017-08-01 19:58:53 -07002187 free_percpu(ndev_ctx->vf_stats);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08002188 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07002189 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002190}
2191
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002192static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002193 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08002194 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002195 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002196};
2197
2198MODULE_DEVICE_TABLE(vmbus, id_table);
2199
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002200/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002201static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00002202 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002203 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002204 .probe = netvsc_probe,
2205 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07002206};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002207
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002208/*
2209 * On Hyper-V, every VF interface is matched with a corresponding
2210 * synthetic interface. The synthetic interface is presented first
2211 * to the guest. When the corresponding VF instance is registered,
2212 * we will take care of switching the data path.
2213 */
2214static int netvsc_netdev_event(struct notifier_block *this,
2215 unsigned long event, void *ptr)
2216{
2217 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2218
2219 /* Skip our own events */
2220 if (event_dev->netdev_ops == &device_ops)
2221 return NOTIFY_DONE;
2222
2223 /* Avoid non-Ethernet type devices */
2224 if (event_dev->type != ARPHRD_ETHER)
2225 return NOTIFY_DONE;
2226
2227 /* Avoid Vlan dev with same MAC registering as VF */
2228 if (is_vlan_dev(event_dev))
2229 return NOTIFY_DONE;
2230
2231 /* Avoid Bonding master dev with same MAC registering as VF */
2232 if ((event_dev->priv_flags & IFF_BONDING) &&
2233 (event_dev->flags & IFF_MASTER))
2234 return NOTIFY_DONE;
2235
2236 switch (event) {
2237 case NETDEV_REGISTER:
2238 return netvsc_register_vf(event_dev);
2239 case NETDEV_UNREGISTER:
2240 return netvsc_unregister_vf(event_dev);
2241 case NETDEV_UP:
2242 case NETDEV_DOWN:
2243 return netvsc_vf_changed(event_dev);
2244 default:
2245 return NOTIFY_DONE;
2246 }
2247}
2248
2249static struct notifier_block netvsc_netdev_notifier = {
2250 .notifier_call = netvsc_netdev_event,
2251};
2252
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002253static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07002254{
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002255 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07002256 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07002257}
2258
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002259static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002260{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002261 int ret;
2262
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002263 if (ring_size < RING_SIZE_MIN) {
2264 ring_size = RING_SIZE_MIN;
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002265 pr_info("Increased ring_size to %u (min allowed)\n",
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002266 ring_size);
2267 }
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002268 netvsc_ring_bytes = ring_size * PAGE_SIZE;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002269
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002270 ret = vmbus_driver_register(&netvsc_drv);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002271 if (ret)
2272 return ret;
2273
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002274 register_netdevice_notifier(&netvsc_netdev_notifier);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002275 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002276}
2277
Hank Janssen26c14cc2010-02-11 23:02:42 +00002278MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07002279MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07002280
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002281module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002282module_exit(netvsc_drv_exit);