blob: e9d54c9ee78cf202fa4fa9fb12512ea9cbd761d1 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070036#include <linux/rtnetlink.h>
stephen hemminger0c195562017-08-01 19:58:53 -070037#include <linux/netpoll.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070038
Hank Janssenfceaf242009-07-13 15:34:54 -070039#include <net/arp.h>
40#include <net/route.h>
41#include <net/sock.h>
42#include <net/pkt_sched.h>
Michael Kelley8eb1b3c2017-05-30 11:36:56 -070043#include <net/checksum.h>
44#include <net/ip6_checksum.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070045
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070046#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070047
stephen hemminger8b532792017-08-09 17:46:11 -070048#define RING_SIZE_MIN 64
49#define NETVSC_MIN_TX_SECTIONS 10
50#define NETVSC_DEFAULT_TX 192 /* ~1M */
51#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */
Stephen Hemminger5023a6d2017-09-14 09:31:07 -070052#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */
stephen hemminger8b532792017-08-09 17:46:11 -070053
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010054#define LINKCHANGE_INT (2 * HZ)
stephen hemminger6123c662017-08-09 17:46:03 -070055#define VF_TAKEOVER_INT (HZ / 10)
stephen hemmingera50af862016-12-06 13:43:54 -080056
Hank Janssen99c8da02010-10-12 10:45:23 -070057static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070058module_param(ring_size, int, S_IRUGO);
59MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070060
Simon Xiao3f300ff2015-04-28 01:05:17 -070061static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
62 NETIF_MSG_LINK | NETIF_MSG_IFUP |
63 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
64 NETIF_MSG_TX_ERR;
65
66static int debug = -1;
67module_param(debug, int, S_IRUGO);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070070static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070071{
Wenqi Ma792df872012-04-19 00:39:37 +000072 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger4f19c0d2017-06-07 15:53:49 -070073 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080074
stephen hemminger4f19c0d2017-06-07 15:53:49 -070075 rndis_filter_update(nvdev);
Hank Janssenfceaf242009-07-13 15:34:54 -070076}
77
Hank Janssenfceaf242009-07-13 15:34:54 -070078static int netvsc_open(struct net_device *net)
79{
Haiyang Zhang53fa1a62017-06-21 16:40:47 -070080 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -070081 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -070082 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
Haiyang Zhang891de742014-02-12 16:54:27 -080083 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070084 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -070085
Haiyang Zhang891de742014-02-12 16:54:27 -080086 netif_carrier_off(net);
87
Haiyang Zhangd515d0f2011-09-28 13:24:15 -070088 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +020089 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -070090 if (ret != 0) {
91 netdev_err(net, "unable to open device (ret %d).\n", ret);
92 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -070093 }
94
Haiyang Zhang2de85302015-07-13 13:09:16 -070095 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -070096
Haiyang Zhang891de742014-02-12 16:54:27 -080097 rdev = nvdev->extension;
stephen hemminger0c195562017-08-01 19:58:53 -070098
99 if (!rdev->link_state)
Haiyang Zhang891de742014-02-12 16:54:27 -0800100 netif_carrier_on(net);
101
stephen hemminger0c195562017-08-01 19:58:53 -0700102 if (vf_netdev) {
103 /* Setting synthetic device up transparently sets
104 * slave as up. If open fails, then slave will be
105 * still be offline (and not used).
106 */
107 ret = dev_open(vf_netdev);
108 if (ret)
109 netdev_warn(net,
110 "unable to open slave: %s: %d\n",
111 vf_netdev->name, ret);
112 }
113 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700114}
115
Hank Janssenfceaf242009-07-13 15:34:54 -0700116static int netvsc_close(struct net_device *net)
117{
Hank Janssenfceaf242009-07-13 15:34:54 -0700118 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -0700119 struct net_device *vf_netdev
120 = rtnl_dereference(net_device_ctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -0700121 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Haiyang Zhangc6f71c42017-08-24 11:50:02 -0700122 int ret = 0;
stephen hemminger40975962017-06-08 16:21:19 -0700123 u32 aread, i, msec = 10, retry = 0, retry_max = 20;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700124 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700125
Haiyang Zhang0a282532012-02-02 07:17:59 +0000126 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700127
Haiyang Zhangc6f71c42017-08-24 11:50:02 -0700128 /* No need to close rndis filter if it is removed already */
129 if (!nvdev)
130 goto out;
131
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200132 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700133 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700134 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700135 return ret;
136 }
137
138 /* Ensure pending bytes in ring are read */
139 while (true) {
140 aread = 0;
141 for (i = 0; i < nvdev->num_chn; i++) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800142 chn = nvdev->chan_table[i].channel;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700143 if (!chn)
144 continue;
145
stephen hemminger40975962017-06-08 16:21:19 -0700146 aread = hv_get_bytes_to_read(&chn->inbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700147 if (aread)
148 break;
149
stephen hemminger40975962017-06-08 16:21:19 -0700150 aread = hv_get_bytes_to_read(&chn->outbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700151 if (aread)
152 break;
153 }
154
155 retry++;
156 if (retry > retry_max || aread == 0)
157 break;
158
159 msleep(msec);
160
161 if (msec < 1000)
162 msec *= 2;
163 }
164
165 if (aread) {
166 netdev_err(net, "Ring buffer not empty after closing rndis\n");
167 ret = -ETIMEDOUT;
168 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700169
Haiyang Zhangc6f71c42017-08-24 11:50:02 -0700170out:
stephen hemminger0c195562017-08-01 19:58:53 -0700171 if (vf_netdev)
172 dev_close(vf_netdev);
173
Hank Janssenfceaf242009-07-13 15:34:54 -0700174 return ret;
175}
176
KY Srinivasan8a002512014-03-08 19:23:14 -0800177static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
stephen hemminger89bb42b2017-08-09 17:46:08 -0700178 int pkt_type)
KY Srinivasan8a002512014-03-08 19:23:14 -0800179{
180 struct rndis_packet *rndis_pkt;
181 struct rndis_per_packet_info *ppi;
182
183 rndis_pkt = &msg->msg.pkt;
184 rndis_pkt->data_offset += ppi_size;
185
186 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
187 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
188
189 ppi->size = ppi_size;
190 ppi->type = pkt_type;
191 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
192
193 rndis_pkt->per_pkt_info_len += ppi_size;
194
195 return ppi;
196}
197
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700198/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
199 * packets. We can use ethtool to change UDP hash level when necessary.
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700200 */
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700201static inline u32 netvsc_get_hash(
202 struct sk_buff *skb,
203 const struct net_device_context *ndc)
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700204{
205 struct flow_keys flow;
206 u32 hash;
207 static u32 hashrnd __read_mostly;
208
209 net_get_random_once(&hashrnd, sizeof(hashrnd));
210
211 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
212 return 0;
213
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700214 if (flow.basic.ip_proto == IPPROTO_TCP ||
215 (flow.basic.ip_proto == IPPROTO_UDP &&
216 ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) ||
217 (flow.basic.n_proto == htons(ETH_P_IPV6) &&
218 ndc->udp6_l4_hash)))) {
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700219 return skb_get_hash(skb);
220 } else {
221 if (flow.basic.n_proto == htons(ETH_P_IP))
222 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
223 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
224 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
225 else
226 hash = 0;
227
228 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
229 }
230
231 return hash;
232}
233
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700234static inline int netvsc_get_tx_queue(struct net_device *ndev,
235 struct sk_buff *skb, int old_idx)
236{
237 const struct net_device_context *ndc = netdev_priv(ndev);
238 struct sock *sk = skb->sk;
239 int q_idx;
240
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700241 q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) &
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700242 (VRSS_SEND_TAB_SIZE - 1)];
243
244 /* If queue index changed record the new value */
245 if (q_idx != old_idx &&
246 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
247 sk_tx_queue_set(sk, q_idx);
248
249 return q_idx;
250}
251
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800252/*
253 * Select queue for transmit.
254 *
255 * If a valid queue has already been assigned, then use that.
256 * Otherwise compute tx queue based on hash and the send table.
257 *
258 * This is basically similar to default (__netdev_pick_tx) with the added step
259 * of using the host send_table when no other queue has been assigned.
260 *
261 * TODO support XPS - but get_xps_queue not exported
262 */
stephen hemminger0c195562017-08-01 19:58:53 -0700263static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700264{
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700265 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700266
stephen hemminger0c195562017-08-01 19:58:53 -0700267 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700268 /* If forwarding a packet, we use the recorded queue when
269 * available for better cache locality.
270 */
271 if (skb_rx_queue_recorded(skb))
272 q_idx = skb_get_rx_queue(skb);
273 else
274 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800275 }
276
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700277 return q_idx;
278}
279
stephen hemminger0c195562017-08-01 19:58:53 -0700280static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
281 void *accel_priv,
282 select_queue_fallback_t fallback)
283{
284 struct net_device_context *ndc = netdev_priv(ndev);
285 struct net_device *vf_netdev;
286 u16 txq;
287
288 rcu_read_lock();
289 vf_netdev = rcu_dereference(ndc->vf_netdev);
290 if (vf_netdev) {
291 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
292 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
293 } else {
294 txq = netvsc_pick_tx(ndev, skb);
295 }
296 rcu_read_unlock();
297
298 while (unlikely(txq >= ndev->real_num_tx_queues))
299 txq -= ndev->real_num_tx_queues;
300
301 return txq;
302}
303
KY Srinivasan54a73572014-03-08 19:23:13 -0800304static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
stephen hemminger89bb42b2017-08-09 17:46:08 -0700305 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800306{
307 int j = 0;
308
309 /* Deal with compund pages by ignoring unused part
310 * of the page.
311 */
312 page += (offset >> PAGE_SHIFT);
313 offset &= ~PAGE_MASK;
314
315 while (len > 0) {
316 unsigned long bytes;
317
318 bytes = PAGE_SIZE - offset;
319 if (bytes > len)
320 bytes = len;
321 pb[j].pfn = page_to_pfn(page);
322 pb[j].offset = offset;
323 pb[j].len = bytes;
324
325 offset += bytes;
326 len -= bytes;
327
328 if (offset == PAGE_SIZE && len) {
329 page++;
330 offset = 0;
331 j++;
332 }
333 }
334
335 return j + 1;
336}
337
KY Srinivasan8a002512014-03-08 19:23:14 -0800338static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800339 struct hv_netvsc_packet *packet,
stephen hemminger02b6de02017-07-28 08:59:44 -0700340 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800341{
342 u32 slots_used = 0;
343 char *data = skb->data;
344 int frags = skb_shinfo(skb)->nr_frags;
345 int i;
346
347 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700348 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800349 * 2. skb linear data
350 * 3. skb fragment data
351 */
stephen hemmingerea5a32c2017-08-09 17:46:10 -0700352 slots_used += fill_pg_buf(virt_to_page(hdr),
353 offset_in_page(hdr),
354 len, &pb[slots_used]);
KY Srinivasan54a73572014-03-08 19:23:13 -0800355
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700356 packet->rmsg_size = len;
357 packet->rmsg_pgcnt = slots_used;
358
KY Srinivasan54a73572014-03-08 19:23:13 -0800359 slots_used += fill_pg_buf(virt_to_page(data),
360 offset_in_page(data),
361 skb_headlen(skb), &pb[slots_used]);
362
363 for (i = 0; i < frags; i++) {
364 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
365
366 slots_used += fill_pg_buf(skb_frag_page(frag),
367 frag->page_offset,
368 skb_frag_size(frag), &pb[slots_used]);
369 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800370 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800371}
372
stephen hemminger80d887d2017-07-24 21:03:19 -0700373static int count_skb_frag_slots(struct sk_buff *skb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800374{
stephen hemminger80d887d2017-07-24 21:03:19 -0700375 int i, frags = skb_shinfo(skb)->nr_frags;
376 int pages = 0;
377
378 for (i = 0; i < frags; i++) {
379 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
380 unsigned long size = skb_frag_size(frag);
381 unsigned long offset = frag->page_offset;
382
383 /* Skip unused frames from start of page */
384 offset &= ~PAGE_MASK;
385 pages += PFN_UP(offset + size);
386 }
387 return pages;
388}
389
390static int netvsc_get_slots(struct sk_buff *skb)
391{
392 char *data = skb->data;
393 unsigned int offset = offset_in_page(data);
394 unsigned int len = skb_headlen(skb);
395 int slots;
396 int frag_slots;
397
398 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
399 frag_slots = count_skb_frag_slots(skb);
400 return slots + frag_slots;
KY Srinivasan54a73572014-03-08 19:23:13 -0800401}
402
stephen hemminger23312a32017-01-24 13:05:59 -0800403static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800404{
stephen hemminger23312a32017-01-24 13:05:59 -0800405 if (skb->protocol == htons(ETH_P_IP)) {
406 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800407
stephen hemminger23312a32017-01-24 13:05:59 -0800408 if (ip->protocol == IPPROTO_TCP)
409 return TRANSPORT_INFO_IPV4_TCP;
410 else if (ip->protocol == IPPROTO_UDP)
411 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800412 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800413 struct ipv6hdr *ip6 = ipv6_hdr(skb);
414
415 if (ip6->nexthdr == IPPROTO_TCP)
416 return TRANSPORT_INFO_IPV6_TCP;
Mohammed Gamal37b9dfa2017-07-24 10:57:26 -0700417 else if (ip6->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800418 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800419 }
420
stephen hemminger23312a32017-01-24 13:05:59 -0800421 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800422}
423
stephen hemminger0c195562017-08-01 19:58:53 -0700424/* Send skb on the slave VF device. */
425static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
426 struct sk_buff *skb)
427{
428 struct net_device_context *ndev_ctx = netdev_priv(net);
429 unsigned int len = skb->len;
430 int rc;
431
432 skb->dev = vf_netdev;
433 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
434
435 rc = dev_queue_xmit(skb);
436 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
437 struct netvsc_vf_pcpu_stats *pcpu_stats
438 = this_cpu_ptr(ndev_ctx->vf_stats);
439
440 u64_stats_update_begin(&pcpu_stats->syncp);
441 pcpu_stats->tx_packets++;
442 pcpu_stats->tx_bytes += len;
443 u64_stats_update_end(&pcpu_stats->syncp);
444 } else {
445 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
446 }
447
448 return rc;
449}
450
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700451static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700452{
Hank Janssenfceaf242009-07-13 15:34:54 -0700453 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200454 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700455 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800456 unsigned int num_data_pgs;
457 struct rndis_message *rndis_msg;
458 struct rndis_packet *rndis_pkt;
stephen hemminger0c195562017-08-01 19:58:53 -0700459 struct net_device *vf_netdev;
KY Srinivasan8a002512014-03-08 19:23:14 -0800460 u32 rndis_msg_size;
KY Srinivasan8a002512014-03-08 19:23:14 -0800461 struct rndis_per_packet_info *ppi;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700462 u32 hash;
stephen hemminger02b6de02017-07-28 08:59:44 -0700463 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
Hank Janssenfceaf242009-07-13 15:34:54 -0700464
stephen hemminger0c195562017-08-01 19:58:53 -0700465 /* if VF is present and up then redirect packets
466 * already called with rcu_read_lock_bh
467 */
468 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
469 if (vf_netdev && netif_running(vf_netdev) &&
470 !netpoll_tx_running(net))
471 return netvsc_vf_xmit(net, vf_netdev, skb);
472
stephen hemminger80d887d2017-07-24 21:03:19 -0700473 /* We will atmost need two pages to describe the rndis
474 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200475 * of pages in a single packet. If skb is scattered around
476 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800477 */
stephen hemminger80d887d2017-07-24 21:03:19 -0700478
479 num_data_pgs = netvsc_get_slots(skb) + 2;
480
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700481 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700482 ++net_device_ctx->eth_stats.tx_scattered;
483
484 if (skb_linearize(skb))
485 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700486
stephen hemminger80d887d2017-07-24 21:03:19 -0700487 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700488 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700489 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700490 goto drop;
491 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800492 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700493
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800494 /*
495 * Place the rndis header in the skb head room and
496 * the skb->cb will be used for hv_netvsc_packet
497 * structure.
498 */
499 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700500 if (ret)
501 goto no_memory;
502
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800503 /* Use the skb control buffer for building up the packet */
504 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
505 FIELD_SIZEOF(struct sk_buff, cb));
506 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700507
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700508 packet->q_idx = skb_get_queue_mapping(skb);
509
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800510 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800511 packet->total_bytes = skb->len;
512 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700513
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800514 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700515
KY Srinivasan24476762015-12-01 16:43:06 -0800516 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700517
KY Srinivasan8a002512014-03-08 19:23:14 -0800518 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800519 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
520 rndis_msg->msg_len = packet->total_data_buflen;
521 rndis_pkt = &rndis_msg->msg.pkt;
522 rndis_pkt->data_offset = sizeof(struct rndis_packet);
523 rndis_pkt->data_len = packet->total_data_buflen;
524 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
525
526 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
527
Haiyang Zhang307f0992014-05-21 12:55:39 -0700528 hash = skb_get_hash_raw(skb);
529 if (hash != 0 && net->real_num_tx_queues > 1) {
530 rndis_msg_size += NDIS_HASH_PPI_SIZE;
531 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
532 NBL_HASH_VALUE);
533 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
534 }
535
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700536 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800537 struct ndis_pkt_8021q_info *vlan;
538
539 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
540 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
stephen hemminger00f50242017-08-09 17:46:09 -0700541 IEEE_8021Q_INFO);
542
543 vlan = (void *)ppi + ppi->ppi_offset;
KY Srinivasan760d1e32015-12-01 16:43:19 -0800544 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
545 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800546 VLAN_PRIO_SHIFT;
547 }
548
stephen hemminger23312a32017-01-24 13:05:59 -0800549 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700550 struct ndis_tcp_lso_info *lso_info;
551
552 rndis_msg_size += NDIS_LSO_PPI_SIZE;
553 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
554 TCP_LARGESEND_PKTINFO);
555
stephen hemminger00f50242017-08-09 17:46:09 -0700556 lso_info = (void *)ppi + ppi->ppi_offset;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700557
558 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800559 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700560 lso_info->lso_v2_transmit.ip_version =
561 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
562 ip_hdr(skb)->tot_len = 0;
563 ip_hdr(skb)->check = 0;
564 tcp_hdr(skb)->check =
565 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
566 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
567 } else {
568 lso_info->lso_v2_transmit.ip_version =
569 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
570 ipv6_hdr(skb)->payload_len = 0;
571 tcp_hdr(skb)->check =
572 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
573 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
574 }
stephen hemminger23312a32017-01-24 13:05:59 -0800575 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700576 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700577 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800578 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
579 struct ndis_tcp_ip_checksum_info *csum_info;
580
stephen hemmingerad19bc82016-10-11 14:03:07 -0700581 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
582 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
583 TCPIP_CHKSUM_PKTINFO);
584
585 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
586 ppi->ppi_offset);
587
stephen hemminger23312a32017-01-24 13:05:59 -0800588 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
589
590 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700591 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800592
593 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
594 csum_info->transmit.tcp_checksum = 1;
595 else
596 csum_info->transmit.udp_checksum = 1;
597 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700598 csum_info->transmit.is_ipv6 = 1;
599
stephen hemminger23312a32017-01-24 13:05:59 -0800600 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
601 csum_info->transmit.tcp_checksum = 1;
602 else
603 csum_info->transmit.udp_checksum = 1;
604 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700605 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800606 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700607 if (skb_checksum_help(skb))
608 goto drop;
609 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700610 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800611
KY Srinivasan8a002512014-03-08 19:23:14 -0800612 /* Start filling in the page buffers with the rndis hdr */
613 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700614 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800615 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
stephen hemminger02b6de02017-07-28 08:59:44 -0700616 skb, packet, pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800617
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800618 /* timestamp packet in software */
619 skb_tx_timestamp(skb);
stephen hemminger2a926f72017-07-19 11:53:17 -0700620
stephen hemminger02b6de02017-07-28 08:59:44 -0700621 ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800622 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700623 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700624
625 if (ret == -EAGAIN) {
626 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700627 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700628 }
629
630 if (ret == -ENOSPC)
631 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700632
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700633drop:
634 dev_kfree_skb_any(skb);
635 net->stats.tx_dropped++;
636
637 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700638
639no_memory:
640 ++net_device_ctx->eth_stats.tx_no_memory;
641 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700642}
stephen hemminger89bb42b2017-08-09 17:46:08 -0700643
Hank Janssen3e189512010-03-04 22:11:00 +0000644/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700645 * netvsc_linkstatus_callback - Link up/down notification
646 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700647void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700648 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700649{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700650 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700651 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700652 struct net_device_context *ndev_ctx;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100653 struct netvsc_reconfig *event;
654 unsigned long flags;
655
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700656 net = hv_get_drvdata(device_obj);
657
658 if (!net)
659 return;
660
661 ndev_ctx = netdev_priv(net);
662
663 /* Update the physical link speed when changing to another vSwitch */
664 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
665 u32 speed;
666
stephen hemminger89bb42b2017-08-09 17:46:08 -0700667 speed = *(u32 *)((void *)indicate
668 + indicate->status_buf_offset) / 10000;
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700669 ndev_ctx->speed = speed;
670 return;
671 }
672
673 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100674 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
675 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
676 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
677 return;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700678
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700679 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700680 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700681
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100682 event = kzalloc(sizeof(*event), GFP_ATOMIC);
683 if (!event)
684 return;
685 event->event = indicate->status;
686
687 spin_lock_irqsave(&ndev_ctx->lock, flags);
688 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
689 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
690
691 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700692}
693
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700694static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800695 struct napi_struct *napi,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800696 const struct ndis_tcp_ip_checksum_info *csum_info,
697 const struct ndis_pkt_8021q_info *vlan,
698 void *data, u32 buflen)
Hank Janssenfceaf242009-07-13 15:34:54 -0700699{
Hank Janssenfceaf242009-07-13 15:34:54 -0700700 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700701
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800702 skb = napi_alloc_skb(napi, buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700703 if (!skb)
704 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700705
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700706 /*
707 * Copy to skb. This copy is needed here since the memory pointed by
708 * hv_netvsc_packet cannot be deallocated
709 */
Johannes Berg59ae1d12017-06-16 14:29:20 +0200710 skb_put_data(skb, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700711
712 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700713
714 /* skb is already created with CHECKSUM_NONE */
715 skb_checksum_none_assert(skb);
716
717 /*
718 * In Linux, the IP checksum is always checked.
719 * Do L4 checksum offload if enabled and present.
720 */
721 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
722 if (csum_info->receive.tcp_checksum_succeeded ||
723 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800724 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800725 }
726
stephen hemmingerdc54a082017-01-24 13:06:08 -0800727 if (vlan) {
728 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
729
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700730 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800731 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800732 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700733
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700734 return skb;
735}
736
737/*
738 * netvsc_recv_callback - Callback when we receive a packet from the
739 * "wire" on the specified device.
740 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800741int netvsc_recv_callback(struct net_device *net,
742 struct vmbus_channel *channel,
743 void *data, u32 len,
744 const struct ndis_tcp_ip_checksum_info *csum_info,
745 const struct ndis_pkt_8021q_info *vlan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700746{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200747 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700748 struct netvsc_device *net_device;
stephen hemminger742fe542017-02-27 10:26:50 -0800749 u16 q_idx = channel->offermsg.offer.sub_channel_index;
stephen hemminger545a8e72017-03-22 14:51:00 -0700750 struct netvsc_channel *nvchan;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700751 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700752 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700753
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700754 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700755 return NVSP_STAT_FAIL;
756
stephen hemminger0719e722017-01-11 09:16:32 -0800757 rcu_read_lock();
stephen hemminger545a8e72017-03-22 14:51:00 -0700758 net_device = rcu_dereference(net_device_ctx->nvdev);
759 if (unlikely(!net_device))
760 goto drop;
761
762 nvchan = &net_device->chan_table[q_idx];
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700763
764 /* Allocate a skb - TODO direct I/O to pages? */
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800765 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
766 csum_info, vlan, data, len);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700767 if (unlikely(!skb)) {
stephen hemminger545a8e72017-03-22 14:51:00 -0700768drop:
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700769 ++net->stats.rx_dropped;
stephen hemminger0719e722017-01-11 09:16:32 -0800770 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700771 return NVSP_STAT_FAIL;
772 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700773
stephen hemminger0c195562017-08-01 19:58:53 -0700774 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700775
776 /*
777 * Even if injecting the packet, record the statistics
778 * on the synthetic device because modifying the VF device
779 * statistics will not work correctly.
780 */
stephen hemminger742fe542017-02-27 10:26:50 -0800781 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700782 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700783 rx_stats->packets++;
stephen hemmingerdc54a082017-01-24 13:06:08 -0800784 rx_stats->bytes += len;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700785
786 if (skb->pkt_type == PACKET_BROADCAST)
787 ++rx_stats->broadcast;
788 else if (skb->pkt_type == PACKET_MULTICAST)
789 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700790 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800791
stephen hemminger742fe542017-02-27 10:26:50 -0800792 napi_gro_receive(&nvchan->napi, skb);
stephen hemminger0719e722017-01-11 09:16:32 -0800793 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700794
Hank Janssenfceaf242009-07-13 15:34:54 -0700795 return 0;
796}
797
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700798static void netvsc_get_drvinfo(struct net_device *net,
799 struct ethtool_drvinfo *info)
800{
Jiri Pirko7826d432013-01-06 00:44:26 +0000801 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000802 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700803}
804
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800805static void netvsc_get_channels(struct net_device *net,
806 struct ethtool_channels *channel)
807{
808 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700809 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800810
811 if (nvdev) {
812 channel->max_combined = nvdev->max_chn;
813 channel->combined_count = nvdev->num_chn;
814 }
815}
816
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700817static int netvsc_set_channels(struct net_device *net,
818 struct ethtool_channels *channels)
819{
820 struct net_device_context *net_device_ctx = netdev_priv(net);
821 struct hv_device *dev = net_device_ctx->device_ctx;
stephen hemminger545a8e72017-03-22 14:51:00 -0700822 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -0700823 unsigned int orig, count = channels->combined_count;
824 struct netvsc_device_info device_info;
stephen hemmingerea383bf2017-07-19 11:53:15 -0700825 bool was_opened;
stephen hemminger7ca45932017-07-24 10:57:28 -0700826 int ret = 0;
stephen hemminger2b018882017-01-24 13:06:03 -0800827
828 /* We do not support separate count for rx, tx, or other */
829 if (count == 0 ||
830 channels->rx_count || channels->tx_count || channels->other_count)
831 return -EINVAL;
832
stephen hemmingera0be4502017-03-22 14:51:01 -0700833 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700834 return -ENODEV;
835
stephen hemminger2b018882017-01-24 13:06:03 -0800836 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700837 return -EINVAL;
838
stephen hemminger2b018882017-01-24 13:06:03 -0800839 if (count > nvdev->max_chn)
840 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700841
stephen hemminger7ca45932017-07-24 10:57:28 -0700842 orig = nvdev->num_chn;
stephen hemmingerea383bf2017-07-19 11:53:15 -0700843 was_opened = rndis_filter_opened(nvdev);
844 if (was_opened)
845 rndis_filter_close(nvdev);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700846
stephen hemminger7ca45932017-07-24 10:57:28 -0700847 memset(&device_info, 0, sizeof(device_info));
848 device_info.num_chn = count;
849 device_info.ring_size = ring_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700850 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700851 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700852 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700853 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700854
855 rndis_filter_device_remove(dev, nvdev);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700856
stephen hemminger7ca45932017-07-24 10:57:28 -0700857 nvdev = rndis_filter_device_add(dev, &device_info);
Stephen Hemminger8195b132017-09-06 13:53:05 -0700858 if (IS_ERR(nvdev)) {
stephen hemmingerd6aac1f2017-07-28 08:59:41 -0700859 ret = PTR_ERR(nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -0700860 device_info.num_chn = orig;
stephen hemminger68d715f2017-08-09 17:46:06 -0700861 nvdev = rndis_filter_device_add(dev, &device_info);
862
863 if (IS_ERR(nvdev)) {
864 netdev_err(net, "restoring channel setting failed: %ld\n",
865 PTR_ERR(nvdev));
866 return ret;
867 }
stephen hemminger7ca45932017-07-24 10:57:28 -0700868 }
869
stephen hemmingerea383bf2017-07-19 11:53:15 -0700870 if (was_opened)
871 rndis_filter_open(nvdev);
stephen hemminger163891d2017-03-22 14:50:58 -0700872
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200873 /* We may have missed link change notifications */
stephen hemminger1b019942017-07-19 11:53:12 -0700874 net_device_ctx->last_reconfig = 0;
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200875 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700876
877 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700878}
879
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100880static bool
881netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800882{
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100883 struct ethtool_link_ksettings diff1 = *cmd;
884 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800885
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100886 diff1.base.speed = 0;
887 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800888 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100889 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
890 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800891 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100892 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800893
894 return !memcmp(&diff1, &diff2, sizeof(diff1));
895}
896
897static void netvsc_init_settings(struct net_device *dev)
898{
899 struct net_device_context *ndc = netdev_priv(dev);
900
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700901 ndc->udp4_l4_hash = true;
902 ndc->udp6_l4_hash = true;
903
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800904 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -0700905 ndc->duplex = DUPLEX_FULL;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800906}
907
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100908static int netvsc_get_link_ksettings(struct net_device *dev,
909 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800910{
911 struct net_device_context *ndc = netdev_priv(dev);
912
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100913 cmd->base.speed = ndc->speed;
914 cmd->base.duplex = ndc->duplex;
915 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800916
917 return 0;
918}
919
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100920static int netvsc_set_link_ksettings(struct net_device *dev,
921 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800922{
923 struct net_device_context *ndc = netdev_priv(dev);
924 u32 speed;
925
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100926 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800927 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100928 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800929 !netvsc_validate_ethtool_ss_cmd(cmd))
930 return -EINVAL;
931
932 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100933 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800934
935 return 0;
936}
937
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800938static int netvsc_change_mtu(struct net_device *ndev, int mtu)
939{
940 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger0c195562017-08-01 19:58:53 -0700941 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -0700942 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200943 struct hv_device *hdev = ndevctx->device_ctx;
stephen hemminger9749fed2017-07-19 11:53:16 -0700944 int orig_mtu = ndev->mtu;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800945 struct netvsc_device_info device_info;
stephen hemmingerea383bf2017-07-19 11:53:15 -0700946 bool was_opened;
stephen hemminger9749fed2017-07-19 11:53:16 -0700947 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800948
stephen hemmingera0be4502017-03-22 14:51:01 -0700949 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800950 return -ENODEV;
951
stephen hemminger0c195562017-08-01 19:58:53 -0700952 /* Change MTU of underlying VF netdev first. */
953 if (vf_netdev) {
954 ret = dev_set_mtu(vf_netdev, mtu);
955 if (ret)
956 return ret;
957 }
958
stephen hemmingerea383bf2017-07-19 11:53:15 -0700959 netif_device_detach(ndev);
960 was_opened = rndis_filter_opened(nvdev);
961 if (was_opened)
962 rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700963
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700964 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800965 device_info.ring_size = ring_size;
stephen hemminger2b018882017-01-24 13:06:03 -0800966 device_info.num_chn = nvdev->num_chn;
stephen hemminger8b532792017-08-09 17:46:11 -0700967 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700968 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700969 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700970 device_info.recv_section_size = nvdev->recv_section_size;
Dexuan Cui152669b2017-03-02 13:00:53 +0000971
Dexuan Cui152669b2017-03-02 13:00:53 +0000972 rndis_filter_device_remove(hdev, nvdev);
973
Dexuan Cui152669b2017-03-02 13:00:53 +0000974 ndev->mtu = mtu;
975
stephen hemminger9749fed2017-07-19 11:53:16 -0700976 nvdev = rndis_filter_device_add(hdev, &device_info);
977 if (IS_ERR(nvdev)) {
978 ret = PTR_ERR(nvdev);
979
980 /* Attempt rollback to original MTU */
981 ndev->mtu = orig_mtu;
stephen hemminger68d715f2017-08-09 17:46:06 -0700982 nvdev = rndis_filter_device_add(hdev, &device_info);
stephen hemminger0c195562017-08-01 19:58:53 -0700983
984 if (vf_netdev)
985 dev_set_mtu(vf_netdev, orig_mtu);
stephen hemminger68d715f2017-08-09 17:46:06 -0700986
987 if (IS_ERR(nvdev)) {
988 netdev_err(ndev, "restoring mtu failed: %ld\n",
989 PTR_ERR(nvdev));
990 return ret;
991 }
stephen hemminger9749fed2017-07-19 11:53:16 -0700992 }
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800993
stephen hemmingerea383bf2017-07-19 11:53:15 -0700994 if (was_opened)
995 rndis_filter_open(nvdev);
996
997 netif_device_attach(ndev);
stephen hemminger163891d2017-03-22 14:50:58 -0700998
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200999 /* We may have missed link change notifications */
1000 schedule_delayed_work(&ndevctx->dwork, 0);
1001
stephen hemminger9749fed2017-07-19 11:53:16 -07001002 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001003}
1004
stephen hemminger0c195562017-08-01 19:58:53 -07001005static void netvsc_get_vf_stats(struct net_device *net,
1006 struct netvsc_vf_pcpu_stats *tot)
1007{
1008 struct net_device_context *ndev_ctx = netdev_priv(net);
1009 int i;
1010
1011 memset(tot, 0, sizeof(*tot));
1012
1013 for_each_possible_cpu(i) {
1014 const struct netvsc_vf_pcpu_stats *stats
1015 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1016 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1017 unsigned int start;
1018
1019 do {
1020 start = u64_stats_fetch_begin_irq(&stats->syncp);
1021 rx_packets = stats->rx_packets;
1022 tx_packets = stats->tx_packets;
1023 rx_bytes = stats->rx_bytes;
1024 tx_bytes = stats->tx_bytes;
1025 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1026
1027 tot->rx_packets += rx_packets;
1028 tot->tx_packets += tx_packets;
1029 tot->rx_bytes += rx_bytes;
1030 tot->tx_bytes += tx_bytes;
1031 tot->tx_dropped += stats->tx_dropped;
1032 }
1033}
1034
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001035static void netvsc_get_stats64(struct net_device *net,
1036 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001037{
1038 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -07001039 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001040 struct netvsc_vf_pcpu_stats vf_tot;
stephen hemminger89bb42b2017-08-09 17:46:08 -07001041 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001042
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001043 if (!nvdev)
1044 return;
1045
stephen hemminger0c195562017-08-01 19:58:53 -07001046 netdev_stats_to_stats64(t, &net->stats);
1047
1048 netvsc_get_vf_stats(net, &vf_tot);
1049 t->rx_packets += vf_tot.rx_packets;
1050 t->tx_packets += vf_tot.tx_packets;
1051 t->rx_bytes += vf_tot.rx_bytes;
1052 t->tx_bytes += vf_tot.tx_bytes;
1053 t->tx_dropped += vf_tot.tx_dropped;
1054
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001055 for (i = 0; i < nvdev->num_chn; i++) {
1056 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1057 const struct netvsc_stats *stats;
1058 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001059 unsigned int start;
1060
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001061 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001062 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001063 start = u64_stats_fetch_begin_irq(&stats->syncp);
1064 packets = stats->packets;
1065 bytes = stats->bytes;
1066 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001067
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001068 t->tx_bytes += bytes;
1069 t->tx_packets += packets;
1070
1071 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001072 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001073 start = u64_stats_fetch_begin_irq(&stats->syncp);
1074 packets = stats->packets;
1075 bytes = stats->bytes;
1076 multicast = stats->multicast + stats->broadcast;
1077 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001078
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001079 t->rx_bytes += bytes;
1080 t->rx_packets += packets;
1081 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001082 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001083}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001084
1085static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1086{
stephen hemminger867047c2017-07-28 08:59:42 -07001087 struct net_device_context *ndc = netdev_priv(ndev);
stephen hemminger16ba3262017-08-09 17:46:05 -07001088 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
stephen hemminger867047c2017-07-28 08:59:42 -07001089 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001090 struct sockaddr *addr = p;
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001091 int err;
1092
stephen hemminger16ba3262017-08-09 17:46:05 -07001093 err = eth_prepare_mac_addr_change(ndev, p);
1094 if (err)
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001095 return err;
1096
stephen hemminger867047c2017-07-28 08:59:42 -07001097 if (!nvdev)
1098 return -ENODEV;
1099
stephen hemminger16ba3262017-08-09 17:46:05 -07001100 if (vf_netdev) {
1101 err = dev_set_mac_address(vf_netdev, addr);
1102 if (err)
1103 return err;
1104 }
1105
stephen hemminger867047c2017-07-28 08:59:42 -07001106 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
stephen hemminger16ba3262017-08-09 17:46:05 -07001107 if (!err) {
1108 eth_commit_mac_addr_change(ndev, p);
1109 } else if (vf_netdev) {
1110 /* rollback change on VF */
1111 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1112 dev_set_mac_address(vf_netdev, addr);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001113 }
1114
1115 return err;
1116}
1117
Stephen Hemminger4323b472016-08-23 12:17:57 -07001118static const struct {
1119 char name[ETH_GSTRING_LEN];
1120 u16 offset;
1121} netvsc_stats[] = {
1122 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1123 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1124 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1125 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1126 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
stephen hemmingercad5c192017-08-09 17:46:12 -07001127 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1128 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
stephen hemminger0c195562017-08-01 19:58:53 -07001129}, vf_stats[] = {
1130 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1131 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1132 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1133 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1134 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001135};
1136
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001137#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
stephen hemminger0c195562017-08-01 19:58:53 -07001138#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001139
1140/* 4 statistics per queue (rx/tx packets/bytes) */
1141#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1142
Stephen Hemminger4323b472016-08-23 12:17:57 -07001143static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1144{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001145 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001146 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001147
1148 if (!nvdev)
1149 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001150
Stephen Hemminger4323b472016-08-23 12:17:57 -07001151 switch (string_set) {
1152 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001153 return NETVSC_GLOBAL_STATS_LEN
1154 + NETVSC_VF_STATS_LEN
1155 + NETVSC_QUEUE_STATS_LEN(nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001156 default:
1157 return -EINVAL;
1158 }
1159}
1160
1161static void netvsc_get_ethtool_stats(struct net_device *dev,
1162 struct ethtool_stats *stats, u64 *data)
1163{
1164 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001165 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001166 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001167 const struct netvsc_stats *qstats;
stephen hemminger0c195562017-08-01 19:58:53 -07001168 struct netvsc_vf_pcpu_stats sum;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001169 unsigned int start;
1170 u64 packets, bytes;
1171 int i, j;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001172
stephen hemminger545a8e72017-03-22 14:51:00 -07001173 if (!nvdev)
1174 return;
1175
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001176 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001177 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001178
stephen hemminger0c195562017-08-01 19:58:53 -07001179 netvsc_get_vf_stats(dev, &sum);
1180 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1181 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1182
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001183 for (j = 0; j < nvdev->num_chn; j++) {
1184 qstats = &nvdev->chan_table[j].tx_stats;
1185
1186 do {
1187 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1188 packets = qstats->packets;
1189 bytes = qstats->bytes;
1190 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1191 data[i++] = packets;
1192 data[i++] = bytes;
1193
1194 qstats = &nvdev->chan_table[j].rx_stats;
1195 do {
1196 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1197 packets = qstats->packets;
1198 bytes = qstats->bytes;
1199 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1200 data[i++] = packets;
1201 data[i++] = bytes;
1202 }
Stephen Hemminger4323b472016-08-23 12:17:57 -07001203}
1204
1205static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1206{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001207 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001208 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001209 u8 *p = data;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001210 int i;
1211
stephen hemminger545a8e72017-03-22 14:51:00 -07001212 if (!nvdev)
1213 return;
1214
Stephen Hemminger4323b472016-08-23 12:17:57 -07001215 switch (stringset) {
1216 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001217 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1218 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1219 p += ETH_GSTRING_LEN;
1220 }
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001221
stephen hemminger0c195562017-08-01 19:58:53 -07001222 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1223 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1224 p += ETH_GSTRING_LEN;
1225 }
1226
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001227 for (i = 0; i < nvdev->num_chn; i++) {
1228 sprintf(p, "tx_queue_%u_packets", i);
1229 p += ETH_GSTRING_LEN;
1230 sprintf(p, "tx_queue_%u_bytes", i);
1231 p += ETH_GSTRING_LEN;
1232 sprintf(p, "rx_queue_%u_packets", i);
1233 p += ETH_GSTRING_LEN;
1234 sprintf(p, "rx_queue_%u_bytes", i);
1235 p += ETH_GSTRING_LEN;
1236 }
1237
Stephen Hemminger4323b472016-08-23 12:17:57 -07001238 break;
1239 }
1240}
1241
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001242static int
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001243netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1244 struct ethtool_rxnfc *info)
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001245{
1246 info->data = RXH_IP_SRC | RXH_IP_DST;
1247
1248 switch (info->flow_type) {
1249 case TCP_V4_FLOW:
1250 case TCP_V6_FLOW:
1251 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001252 break;
1253
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001254 case UDP_V4_FLOW:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001255 if (ndc->udp4_l4_hash)
1256 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1257
1258 break;
1259
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001260 case UDP_V6_FLOW:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001261 if (ndc->udp6_l4_hash)
1262 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1263
1264 break;
1265
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001266 case IPV4_FLOW:
1267 case IPV6_FLOW:
1268 break;
1269 default:
1270 info->data = 0;
1271 break;
1272 }
1273
1274 return 0;
1275}
1276
1277static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001278netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1279 u32 *rules)
1280{
1281 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001282 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001283
1284 if (!nvdev)
1285 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001286
1287 switch (info->cmd) {
1288 case ETHTOOL_GRXRINGS:
1289 info->data = nvdev->num_chn;
1290 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001291
1292 case ETHTOOL_GRXFH:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001293 return netvsc_get_rss_hash_opts(ndc, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001294 }
1295 return -EOPNOTSUPP;
1296}
1297
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001298static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1299 struct ethtool_rxnfc *info)
1300{
1301 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1302 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1303 if (info->flow_type == UDP_V4_FLOW)
1304 ndc->udp4_l4_hash = true;
1305 else if (info->flow_type == UDP_V6_FLOW)
1306 ndc->udp6_l4_hash = true;
1307 else
1308 return -EOPNOTSUPP;
1309
1310 return 0;
1311 }
1312
1313 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1314 if (info->flow_type == UDP_V4_FLOW)
1315 ndc->udp4_l4_hash = false;
1316 else if (info->flow_type == UDP_V6_FLOW)
1317 ndc->udp6_l4_hash = false;
1318 else
1319 return -EOPNOTSUPP;
1320
1321 return 0;
1322 }
1323
1324 return -EOPNOTSUPP;
1325}
1326
1327static int
1328netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1329{
1330 struct net_device_context *ndc = netdev_priv(ndev);
1331
1332 if (info->cmd == ETHTOOL_SRXFH)
1333 return netvsc_set_rss_hash_opts(ndc, info);
1334
1335 return -EOPNOTSUPP;
1336}
1337
Richard Weinberger316158f2014-07-09 16:23:59 +02001338#ifdef CONFIG_NET_POLL_CONTROLLER
stephen hemmingera5ecd432017-06-07 15:53:48 -07001339static void netvsc_poll_controller(struct net_device *dev)
Richard Weinberger316158f2014-07-09 16:23:59 +02001340{
stephen hemmingera5ecd432017-06-07 15:53:48 -07001341 struct net_device_context *ndc = netdev_priv(dev);
1342 struct netvsc_device *ndev;
1343 int i;
1344
1345 rcu_read_lock();
1346 ndev = rcu_dereference(ndc->nvdev);
1347 if (ndev) {
1348 for (i = 0; i < ndev->num_chn; i++) {
1349 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1350
1351 napi_schedule(&nvchan->napi);
1352 }
1353 }
1354 rcu_read_unlock();
Richard Weinberger316158f2014-07-09 16:23:59 +02001355}
1356#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001357
stephen hemminger962f3fe2017-01-24 13:06:02 -08001358static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1359{
1360 return NETVSC_HASH_KEYLEN;
1361}
1362
1363static u32 netvsc_rss_indir_size(struct net_device *dev)
1364{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001365 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001366}
1367
1368static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1369 u8 *hfunc)
1370{
1371 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001372 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001373 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001374 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001375
stephen hemminger545a8e72017-03-22 14:51:00 -07001376 if (!ndev)
1377 return -ENODEV;
1378
stephen hemminger962f3fe2017-01-24 13:06:02 -08001379 if (hfunc)
1380 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1381
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001382 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001383 if (indir) {
1384 for (i = 0; i < ITAB_NUM; i++)
1385 indir[i] = rndis_dev->ind_table[i];
1386 }
1387
stephen hemminger962f3fe2017-01-24 13:06:02 -08001388 if (key)
1389 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1390
1391 return 0;
1392}
1393
1394static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1395 const u8 *key, const u8 hfunc)
1396{
1397 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001398 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001399 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001400 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001401
stephen hemminger545a8e72017-03-22 14:51:00 -07001402 if (!ndev)
1403 return -ENODEV;
1404
stephen hemminger962f3fe2017-01-24 13:06:02 -08001405 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1406 return -EOPNOTSUPP;
1407
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001408 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001409 if (indir) {
1410 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhangdb3cd7a2017-09-01 14:30:07 -07001411 if (indir[i] >= ndev->num_chn)
stephen hemmingerff4a4412017-01-24 13:06:04 -08001412 return -EINVAL;
1413
1414 for (i = 0; i < ITAB_NUM; i++)
1415 rndis_dev->ind_table[i] = indir[i];
1416 }
1417
1418 if (!key) {
1419 if (!indir)
1420 return 0;
1421
1422 key = rndis_dev->rss_key;
1423 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001424
Haiyang Zhang715e2ec2017-09-01 14:30:04 -07001425 return rndis_filter_set_rss_param(rndis_dev, key);
stephen hemminger962f3fe2017-01-24 13:06:02 -08001426}
1427
stephen hemminger8b532792017-08-09 17:46:11 -07001428/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1429 * It does have pre-allocated receive area which is divided into sections.
1430 */
1431static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1432 struct ethtool_ringparam *ring)
1433{
1434 u32 max_buf_size;
1435
1436 ring->rx_pending = nvdev->recv_section_cnt;
1437 ring->tx_pending = nvdev->send_section_cnt;
1438
1439 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1440 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1441 else
1442 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1443
1444 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1445 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1446 / nvdev->send_section_size;
1447}
1448
1449static void netvsc_get_ringparam(struct net_device *ndev,
1450 struct ethtool_ringparam *ring)
1451{
1452 struct net_device_context *ndevctx = netdev_priv(ndev);
1453 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1454
1455 if (!nvdev)
1456 return;
1457
1458 __netvsc_get_ringparam(nvdev, ring);
1459}
1460
1461static int netvsc_set_ringparam(struct net_device *ndev,
1462 struct ethtool_ringparam *ring)
1463{
1464 struct net_device_context *ndevctx = netdev_priv(ndev);
1465 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1466 struct hv_device *hdev = ndevctx->device_ctx;
1467 struct netvsc_device_info device_info;
1468 struct ethtool_ringparam orig;
1469 u32 new_tx, new_rx;
1470 bool was_opened;
1471 int ret = 0;
1472
1473 if (!nvdev || nvdev->destroy)
1474 return -ENODEV;
1475
1476 memset(&orig, 0, sizeof(orig));
1477 __netvsc_get_ringparam(nvdev, &orig);
1478
1479 new_tx = clamp_t(u32, ring->tx_pending,
1480 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1481 new_rx = clamp_t(u32, ring->rx_pending,
1482 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1483
1484 if (new_tx == orig.tx_pending &&
1485 new_rx == orig.rx_pending)
1486 return 0; /* no change */
1487
1488 memset(&device_info, 0, sizeof(device_info));
1489 device_info.num_chn = nvdev->num_chn;
1490 device_info.ring_size = ring_size;
1491 device_info.send_sections = new_tx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001492 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001493 device_info.recv_sections = new_rx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001494 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001495
1496 netif_device_detach(ndev);
1497 was_opened = rndis_filter_opened(nvdev);
1498 if (was_opened)
1499 rndis_filter_close(nvdev);
1500
1501 rndis_filter_device_remove(hdev, nvdev);
1502
1503 nvdev = rndis_filter_device_add(hdev, &device_info);
1504 if (IS_ERR(nvdev)) {
1505 ret = PTR_ERR(nvdev);
1506
1507 device_info.send_sections = orig.tx_pending;
1508 device_info.recv_sections = orig.rx_pending;
1509 nvdev = rndis_filter_device_add(hdev, &device_info);
1510 if (IS_ERR(nvdev)) {
1511 netdev_err(ndev, "restoring ringparam failed: %ld\n",
1512 PTR_ERR(nvdev));
1513 return ret;
1514 }
1515 }
1516
1517 if (was_opened)
1518 rndis_filter_open(nvdev);
1519 netif_device_attach(ndev);
1520
1521 /* We may have missed link change notifications */
1522 ndevctx->last_reconfig = 0;
1523 schedule_delayed_work(&ndevctx->dwork, 0);
1524
1525 return ret;
1526}
1527
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001528static const struct ethtool_ops ethtool_ops = {
1529 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001530 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001531 .get_ethtool_stats = netvsc_get_ethtool_stats,
1532 .get_sset_count = netvsc_get_sset_count,
1533 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001534 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001535 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001536 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001537 .get_rxnfc = netvsc_get_rxnfc,
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001538 .set_rxnfc = netvsc_set_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001539 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1540 .get_rxfh_indir_size = netvsc_rss_indir_size,
1541 .get_rxfh = netvsc_get_rxfh,
1542 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001543 .get_link_ksettings = netvsc_get_link_ksettings,
1544 .set_link_ksettings = netvsc_set_link_ksettings,
stephen hemminger8b532792017-08-09 17:46:11 -07001545 .get_ringparam = netvsc_get_ringparam,
1546 .set_ringparam = netvsc_set_ringparam,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001547};
1548
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001549static const struct net_device_ops device_ops = {
1550 .ndo_open = netvsc_open,
1551 .ndo_stop = netvsc_close,
1552 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001553 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001554 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001555 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001556 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001557 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001558 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001559#ifdef CONFIG_NET_POLL_CONTROLLER
1560 .ndo_poll_controller = netvsc_poll_controller,
1561#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001562};
1563
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001564/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001565 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1566 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1567 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001568 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001569static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001570{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001571 struct net_device_context *ndev_ctx =
1572 container_of(w, struct net_device_context, dwork.work);
1573 struct hv_device *device_obj = ndev_ctx->device_ctx;
1574 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001575 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001576 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001577 struct netvsc_reconfig *event = NULL;
1578 bool notify = false, reschedule = false;
1579 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001580
stephen hemminger9b4e9462017-08-24 16:49:16 -07001581 /* if changes are happening, comeback later */
1582 if (!rtnl_trylock()) {
1583 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1584 return;
1585 }
1586
stephen hemmingera0be4502017-03-22 14:51:01 -07001587 net_device = rtnl_dereference(ndev_ctx->nvdev);
1588 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001589 goto out_unlock;
1590
Haiyang Zhang891de742014-02-12 16:54:27 -08001591 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001592
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001593 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1594 if (time_is_after_jiffies(next_reconfig)) {
1595 /* link_watch only sends one notification with current state
1596 * per second, avoid doing reconfig more frequently. Handle
1597 * wrap around.
1598 */
1599 delay = next_reconfig - jiffies;
1600 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1601 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001602 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001603 }
1604 ndev_ctx->last_reconfig = jiffies;
1605
1606 spin_lock_irqsave(&ndev_ctx->lock, flags);
1607 if (!list_empty(&ndev_ctx->reconfig_events)) {
1608 event = list_first_entry(&ndev_ctx->reconfig_events,
1609 struct netvsc_reconfig, list);
1610 list_del(&event->list);
1611 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1612 }
1613 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1614
1615 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001616 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001617
1618 switch (event->event) {
1619 /* Only the following events are possible due to the check in
1620 * netvsc_linkstatus_callback()
1621 */
1622 case RNDIS_STATUS_MEDIA_CONNECT:
1623 if (rdev->link_state) {
1624 rdev->link_state = false;
stephen hemminger0c195562017-08-01 19:58:53 -07001625 netif_carrier_on(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001626 netif_tx_wake_all_queues(net);
1627 } else {
1628 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001629 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001630 kfree(event);
1631 break;
1632 case RNDIS_STATUS_MEDIA_DISCONNECT:
1633 if (!rdev->link_state) {
1634 rdev->link_state = true;
1635 netif_carrier_off(net);
1636 netif_tx_stop_all_queues(net);
1637 }
1638 kfree(event);
1639 break;
1640 case RNDIS_STATUS_NETWORK_CHANGE:
1641 /* Only makes sense if carrier is present */
1642 if (!rdev->link_state) {
1643 rdev->link_state = true;
1644 netif_carrier_off(net);
1645 netif_tx_stop_all_queues(net);
1646 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1647 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001648 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001649 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1650 reschedule = true;
1651 }
1652 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001653 }
1654
1655 rtnl_unlock();
1656
1657 if (notify)
1658 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001659
1660 /* link_watch only sends one notification with current state per
1661 * second, handle next reconfig event in 2 seconds.
1662 */
1663 if (reschedule)
1664 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001665
1666 return;
1667
1668out_unlock:
1669 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001670}
1671
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001672static struct net_device *get_netvsc_bymac(const u8 *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001673{
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001674 struct net_device *dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001675
Stephen Hemminger8737caa2016-08-23 12:17:44 -07001676 ASSERT_RTNL();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001677
1678 for_each_netdev(&init_net, dev) {
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001679 if (dev->netdev_ops != &device_ops)
1680 continue; /* not a netvsc device */
1681
1682 if (ether_addr_equal(mac, dev->perm_addr))
1683 return dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001684 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001685
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001686 return NULL;
1687}
1688
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001689static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001690{
1691 struct net_device *dev;
1692
1693 ASSERT_RTNL();
1694
1695 for_each_netdev(&init_net, dev) {
1696 struct net_device_context *net_device_ctx;
1697
1698 if (dev->netdev_ops != &device_ops)
1699 continue; /* not a netvsc device */
1700
1701 net_device_ctx = netdev_priv(dev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -07001702 if (!rtnl_dereference(net_device_ctx->nvdev))
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001703 continue; /* device is removed */
1704
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001705 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001706 return dev; /* a match */
1707 }
1708
1709 return NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001710}
1711
stephen hemminger0c195562017-08-01 19:58:53 -07001712/* Called when VF is injecting data into network stack.
1713 * Change the associated network device from VF to netvsc.
1714 * note: already called with rcu_read_lock
1715 */
1716static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1717{
1718 struct sk_buff *skb = *pskb;
1719 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1720 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1721 struct netvsc_vf_pcpu_stats *pcpu_stats
1722 = this_cpu_ptr(ndev_ctx->vf_stats);
1723
1724 skb->dev = ndev;
1725
1726 u64_stats_update_begin(&pcpu_stats->syncp);
1727 pcpu_stats->rx_packets++;
1728 pcpu_stats->rx_bytes += skb->len;
1729 u64_stats_update_end(&pcpu_stats->syncp);
1730
1731 return RX_HANDLER_ANOTHER;
1732}
1733
1734static int netvsc_vf_join(struct net_device *vf_netdev,
1735 struct net_device *ndev)
1736{
1737 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1738 int ret;
1739
1740 ret = netdev_rx_handler_register(vf_netdev,
1741 netvsc_vf_handle_frame, ndev);
1742 if (ret != 0) {
1743 netdev_err(vf_netdev,
1744 "can not register netvsc VF receive handler (err = %d)\n",
1745 ret);
1746 goto rx_handler_failed;
1747 }
1748
1749 ret = netdev_upper_dev_link(vf_netdev, ndev);
1750 if (ret != 0) {
1751 netdev_err(vf_netdev,
1752 "can not set master device %s (err = %d)\n",
1753 ndev->name, ret);
1754 goto upper_link_failed;
1755 }
1756
1757 /* set slave flag before open to prevent IPv6 addrconf */
1758 vf_netdev->flags |= IFF_SLAVE;
1759
stephen hemminger6123c662017-08-09 17:46:03 -07001760 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1761
1762 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001763
1764 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
1765 return 0;
1766
1767upper_link_failed:
1768 netdev_rx_handler_unregister(vf_netdev);
1769rx_handler_failed:
1770 return ret;
1771}
1772
1773static void __netvsc_vf_setup(struct net_device *ndev,
1774 struct net_device *vf_netdev)
1775{
1776 int ret;
1777
stephen hemminger0c195562017-08-01 19:58:53 -07001778 /* Align MTU of VF with master */
1779 ret = dev_set_mtu(vf_netdev, ndev->mtu);
1780 if (ret)
1781 netdev_warn(vf_netdev,
1782 "unable to change mtu to %u\n", ndev->mtu);
1783
1784 if (netif_running(ndev)) {
1785 ret = dev_open(vf_netdev);
1786 if (ret)
1787 netdev_warn(vf_netdev,
1788 "unable to open: %d\n", ret);
1789 }
1790}
1791
1792/* Setup VF as slave of the synthetic device.
1793 * Runs in workqueue to avoid recursion in netlink callbacks.
1794 */
1795static void netvsc_vf_setup(struct work_struct *w)
1796{
1797 struct net_device_context *ndev_ctx
stephen hemminger6123c662017-08-09 17:46:03 -07001798 = container_of(w, struct net_device_context, vf_takeover.work);
stephen hemminger0c195562017-08-01 19:58:53 -07001799 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
1800 struct net_device *vf_netdev;
1801
stephen hemmingerfb84af82017-08-04 12:14:00 -07001802 if (!rtnl_trylock()) {
stephen hemminger6123c662017-08-09 17:46:03 -07001803 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
stephen hemmingerfb84af82017-08-04 12:14:00 -07001804 return;
1805 }
1806
stephen hemminger0c195562017-08-01 19:58:53 -07001807 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1808 if (vf_netdev)
1809 __netvsc_vf_setup(ndev, vf_netdev);
1810
1811 rtnl_unlock();
1812}
1813
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001814static int netvsc_register_vf(struct net_device *vf_netdev)
1815{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001816 struct net_device *ndev;
1817 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001818 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001819
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001820 if (vf_netdev->addr_len != ETH_ALEN)
1821 return NOTIFY_DONE;
1822
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001823 /*
1824 * We will use the MAC address to locate the synthetic interface to
1825 * associate with the VF interface. If we don't find a matching
1826 * synthetic interface, move on.
1827 */
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001828 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001829 if (!ndev)
1830 return NOTIFY_DONE;
1831
1832 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001833 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001834 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001835 return NOTIFY_DONE;
1836
stephen hemminger0c195562017-08-01 19:58:53 -07001837 if (netvsc_vf_join(vf_netdev, ndev) != 0)
1838 return NOTIFY_DONE;
1839
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001840 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
stephen hemminger0c195562017-08-01 19:58:53 -07001841
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001842 dev_hold(vf_netdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001843 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001844 return NOTIFY_OK;
1845}
1846
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001847/* VF up/down change detected, schedule to change data path */
1848static int netvsc_vf_changed(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001849{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001850 struct net_device_context *net_device_ctx;
stephen hemminger7b83f522017-08-07 11:30:00 -07001851 struct netvsc_device *netvsc_dev;
stephen hemminger0c195562017-08-01 19:58:53 -07001852 struct net_device *ndev;
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001853 bool vf_is_up = netif_running(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001854
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001855 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001856 if (!ndev)
1857 return NOTIFY_DONE;
1858
1859 net_device_ctx = netdev_priv(ndev);
stephen hemminger7b83f522017-08-07 11:30:00 -07001860 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1861 if (!netvsc_dev)
1862 return NOTIFY_DONE;
1863
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001864 netvsc_switch_datapath(ndev, vf_is_up);
1865 netdev_info(ndev, "Data path switched %s VF: %s\n",
1866 vf_is_up ? "to" : "from", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001867
1868 return NOTIFY_OK;
1869}
1870
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001871static int netvsc_unregister_vf(struct net_device *vf_netdev)
1872{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001873 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001874 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001875
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001876 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001877 if (!ndev)
1878 return NOTIFY_DONE;
1879
1880 net_device_ctx = netdev_priv(ndev);
stephen hemminger6123c662017-08-09 17:46:03 -07001881 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001882
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001883 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001884
Stephen Hemmingerec158f72017-08-31 16:16:13 -07001885 netdev_rx_handler_unregister(vf_netdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001886 netdev_upper_dev_unlink(vf_netdev, ndev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001887 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001888 dev_put(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07001889
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001890 return NOTIFY_OK;
1891}
1892
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001893static int netvsc_probe(struct hv_device *dev,
1894 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001895{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001896 struct net_device *net = NULL;
1897 struct net_device_context *net_device_ctx;
1898 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001899 struct netvsc_device *nvdev;
stephen hemminger0c195562017-08-01 19:58:53 -07001900 int ret = -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001901
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001902 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08001903 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001904 if (!net)
stephen hemminger0c195562017-08-01 19:58:53 -07001905 goto no_net;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001906
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001907 netif_carrier_off(net);
1908
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001909 netvsc_init_settings(net);
1910
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001911 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001912 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001913 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1914 if (netif_msg_probe(net_device_ctx))
1915 netdev_dbg(net, "netvsc msg_enable: %d\n",
1916 net_device_ctx->msg_enable);
1917
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001918 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001919
Haiyang Zhang891de742014-02-12 16:54:27 -08001920 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001921
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001922 spin_lock_init(&net_device_ctx->lock);
1923 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
stephen hemminger6123c662017-08-09 17:46:03 -07001924 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
stephen hemminger0c195562017-08-01 19:58:53 -07001925
1926 net_device_ctx->vf_stats
1927 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
1928 if (!net_device_ctx->vf_stats)
1929 goto no_stats;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001930
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001931 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001932 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001933 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001934
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001935 /* We always need headroom for rndis header */
1936 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1937
Haiyang Zhang6450f8f2017-09-22 15:31:38 -07001938 /* Initialize the number of queues to be 1, we may change it if more
1939 * channels are offered later.
1940 */
1941 netif_set_real_num_tx_queues(net, 1);
1942 netif_set_real_num_rx_queues(net, 1);
1943
Haiyang Zhang692e0842011-09-01 12:19:43 -07001944 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001945 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001946 device_info.ring_size = ring_size;
stephen hemminger3071ada2017-03-22 14:50:59 -07001947 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
stephen hemminger8b532792017-08-09 17:46:11 -07001948 device_info.send_sections = NETVSC_DEFAULT_TX;
Alex Ng0ab09be2017-09-20 11:17:35 -07001949 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
stephen hemminger8b532792017-08-09 17:46:11 -07001950 device_info.recv_sections = NETVSC_DEFAULT_RX;
Alex Ng0ab09be2017-09-20 11:17:35 -07001951 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
stephen hemminger9749fed2017-07-19 11:53:16 -07001952
1953 nvdev = rndis_filter_device_add(dev, &device_info);
1954 if (IS_ERR(nvdev)) {
1955 ret = PTR_ERR(nvdev);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001956 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
stephen hemminger0c195562017-08-01 19:58:53 -07001957 goto rndis_failed;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001958 }
stephen hemminger0c195562017-08-01 19:58:53 -07001959
Haiyang Zhang692e0842011-09-01 12:19:43 -07001960 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1961
stephen hemminger23312a32017-01-24 13:05:59 -08001962 /* hw_features computed in rndis_filter_device_add */
1963 net->features = net->hw_features |
1964 NETIF_F_HIGHDMA | NETIF_F_SG |
1965 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1966 net->vlan_features = net->features;
1967
stephen hemminger9749fed2017-07-19 11:53:16 -07001968 netdev_lockdep_set_classes(net);
1969
Jarod Wilsond0c2c992016-10-20 13:55:21 -04001970 /* MTU range: 68 - 1500 or 65521 */
1971 net->min_mtu = NETVSC_MTU_MIN;
1972 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1973 net->max_mtu = NETVSC_MTU - ETH_HLEN;
1974 else
1975 net->max_mtu = ETH_DATA_LEN;
1976
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001977 ret = register_netdev(net);
1978 if (ret != 0) {
1979 pr_err("Unable to register netdev.\n");
stephen hemminger0c195562017-08-01 19:58:53 -07001980 goto register_failed;
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001981 }
1982
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001983 return ret;
stephen hemminger0c195562017-08-01 19:58:53 -07001984
1985register_failed:
1986 rndis_filter_device_remove(dev, nvdev);
1987rndis_failed:
1988 free_percpu(net_device_ctx->vf_stats);
1989no_stats:
1990 hv_set_drvdata(dev, NULL);
1991 free_netdev(net);
1992no_net:
1993 return ret;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001994}
1995
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001996static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001997{
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001998 struct net_device_context *ndev_ctx;
Stephen Hemmingerec158f72017-08-31 16:16:13 -07001999 struct net_device *vf_netdev;
2000 struct net_device *net;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07002001
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002002 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002003 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002004 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002005 return 0;
2006 }
2007
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002008 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002009
stephen hemmingera0be4502017-03-22 14:51:01 -07002010 netif_device_detach(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02002011
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002012 cancel_delayed_work_sync(&ndev_ctx->dwork);
2013
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002014 /*
2015 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07002016 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002017 */
stephen hemmingera0be4502017-03-22 14:51:01 -07002018 rtnl_lock();
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002019 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2020 if (vf_netdev)
2021 netvsc_unregister_vf(vf_netdev);
2022
Stephen Hemminger8195b132017-09-06 13:53:05 -07002023 unregister_netdevice(net);
2024
stephen hemminger79e8cbe2017-07-19 11:53:13 -07002025 rndis_filter_device_remove(dev,
2026 rtnl_dereference(ndev_ctx->nvdev));
stephen hemmingera0be4502017-03-22 14:51:01 -07002027 rtnl_unlock();
2028
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002029 hv_set_drvdata(dev, NULL);
2030
stephen hemminger0c195562017-08-01 19:58:53 -07002031 free_percpu(ndev_ctx->vf_stats);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08002032 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07002033 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002034}
2035
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002036static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002037 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08002038 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002039 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002040};
2041
2042MODULE_DEVICE_TABLE(vmbus, id_table);
2043
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002044/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002045static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00002046 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002047 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002048 .probe = netvsc_probe,
2049 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07002050};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002051
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002052/*
2053 * On Hyper-V, every VF interface is matched with a corresponding
2054 * synthetic interface. The synthetic interface is presented first
2055 * to the guest. When the corresponding VF instance is registered,
2056 * we will take care of switching the data path.
2057 */
2058static int netvsc_netdev_event(struct notifier_block *this,
2059 unsigned long event, void *ptr)
2060{
2061 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2062
Stephen Hemmingeree837a12016-09-22 16:56:31 -07002063 /* Skip our own events */
2064 if (event_dev->netdev_ops == &device_ops)
2065 return NOTIFY_DONE;
2066
2067 /* Avoid non-Ethernet type devices */
2068 if (event_dev->type != ARPHRD_ETHER)
2069 return NOTIFY_DONE;
2070
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02002071 /* Avoid Vlan dev with same MAC registering as VF */
Parav Panditd0d7b102017-02-04 11:00:49 -06002072 if (is_vlan_dev(event_dev))
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02002073 return NOTIFY_DONE;
2074
2075 /* Avoid Bonding master dev with same MAC registering as VF */
Stephen Hemmingeree837a12016-09-22 16:56:31 -07002076 if ((event_dev->priv_flags & IFF_BONDING) &&
2077 (event_dev->flags & IFF_MASTER))
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07002078 return NOTIFY_DONE;
2079
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002080 switch (event) {
2081 case NETDEV_REGISTER:
2082 return netvsc_register_vf(event_dev);
2083 case NETDEV_UNREGISTER:
2084 return netvsc_unregister_vf(event_dev);
2085 case NETDEV_UP:
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002086 case NETDEV_DOWN:
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002087 return netvsc_vf_changed(event_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002088 default:
2089 return NOTIFY_DONE;
2090 }
2091}
2092
2093static struct notifier_block netvsc_netdev_notifier = {
2094 .notifier_call = netvsc_netdev_event,
2095};
2096
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002097static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07002098{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002099 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07002100 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07002101}
2102
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002103static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002104{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002105 int ret;
2106
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002107 if (ring_size < RING_SIZE_MIN) {
2108 ring_size = RING_SIZE_MIN;
2109 pr_info("Increased ring_size to %d (min allowed)\n",
2110 ring_size);
2111 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002112 ret = vmbus_driver_register(&netvsc_drv);
2113
2114 if (ret)
2115 return ret;
2116
2117 register_netdevice_notifier(&netvsc_netdev_notifier);
2118 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002119}
2120
Hank Janssen26c14cc2010-02-11 23:02:42 +00002121MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07002122MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07002123
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002124module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002125module_exit(netvsc_drv_exit);