blob: 82d6c022ca859735c412eadad487556eb34b6f33 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000043#define RING_SIZE_MIN 64
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010044#define LINKCHANGE_INT (2 * HZ)
stephen hemmingera50af862016-12-06 13:43:54 -080045
Hank Janssen99c8da02010-10-12 10:45:23 -070046static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070047module_param(ring_size, int, S_IRUGO);
48MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070049
Simon Xiao3f300ff2015-04-28 01:05:17 -070050static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
51 NETIF_MSG_LINK | NETIF_MSG_IFUP |
52 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
53 NETIF_MSG_TX_ERR;
54
55static int debug = -1;
56module_param(debug, int, S_IRUGO);
57MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
58
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070059static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070060{
Wenqi Ma792df872012-04-19 00:39:37 +000061 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger4f19c0d2017-06-07 15:53:49 -070062 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080063
stephen hemminger4f19c0d2017-06-07 15:53:49 -070064 rndis_filter_update(nvdev);
Hank Janssenfceaf242009-07-13 15:34:54 -070065}
66
Hank Janssenfceaf242009-07-13 15:34:54 -070067static int netvsc_open(struct net_device *net)
68{
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +020069 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
Haiyang Zhang891de742014-02-12 16:54:27 -080070 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070071 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -070072
Haiyang Zhang891de742014-02-12 16:54:27 -080073 netif_carrier_off(net);
74
Haiyang Zhangd515d0f2011-09-28 13:24:15 -070075 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +020076 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -070077 if (ret != 0) {
78 netdev_err(net, "unable to open device (ret %d).\n", ret);
79 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -070080 }
81
Haiyang Zhang2de85302015-07-13 13:09:16 -070082 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -070083
Haiyang Zhang891de742014-02-12 16:54:27 -080084 rdev = nvdev->extension;
85 if (!rdev->link_state)
86 netif_carrier_on(net);
87
Hank Janssenfceaf242009-07-13 15:34:54 -070088 return ret;
89}
90
Hank Janssenfceaf242009-07-13 15:34:54 -070091static int netvsc_close(struct net_device *net)
92{
Hank Janssenfceaf242009-07-13 15:34:54 -070093 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -070094 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070095 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -070096 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
97 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -070098
Haiyang Zhang0a282532012-02-02 07:17:59 +000099 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700100
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200101 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700102 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700103 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700104 return ret;
105 }
106
107 /* Ensure pending bytes in ring are read */
108 while (true) {
109 aread = 0;
110 for (i = 0; i < nvdev->num_chn; i++) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800111 chn = nvdev->chan_table[i].channel;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700112 if (!chn)
113 continue;
114
115 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
116 &awrite);
117
118 if (aread)
119 break;
120
121 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
122 &awrite);
123
124 if (aread)
125 break;
126 }
127
128 retry++;
129 if (retry > retry_max || aread == 0)
130 break;
131
132 msleep(msec);
133
134 if (msec < 1000)
135 msec *= 2;
136 }
137
138 if (aread) {
139 netdev_err(net, "Ring buffer not empty after closing rndis\n");
140 ret = -ETIMEDOUT;
141 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700142
Hank Janssenfceaf242009-07-13 15:34:54 -0700143 return ret;
144}
145
KY Srinivasan8a002512014-03-08 19:23:14 -0800146static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
147 int pkt_type)
148{
149 struct rndis_packet *rndis_pkt;
150 struct rndis_per_packet_info *ppi;
151
152 rndis_pkt = &msg->msg.pkt;
153 rndis_pkt->data_offset += ppi_size;
154
155 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
156 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
157
158 ppi->size = ppi_size;
159 ppi->type = pkt_type;
160 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
161
162 rndis_pkt->per_pkt_info_len += ppi_size;
163
164 return ppi;
165}
166
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700167/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute
168 * hash for non-TCP traffic with only IP numbers.
169 */
170static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk)
171{
172 struct flow_keys flow;
173 u32 hash;
174 static u32 hashrnd __read_mostly;
175
176 net_get_random_once(&hashrnd, sizeof(hashrnd));
177
178 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
179 return 0;
180
181 if (flow.basic.ip_proto == IPPROTO_TCP) {
182 return skb_get_hash(skb);
183 } else {
184 if (flow.basic.n_proto == htons(ETH_P_IP))
185 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
186 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
187 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
188 else
189 hash = 0;
190
191 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
192 }
193
194 return hash;
195}
196
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700197static inline int netvsc_get_tx_queue(struct net_device *ndev,
198 struct sk_buff *skb, int old_idx)
199{
200 const struct net_device_context *ndc = netdev_priv(ndev);
201 struct sock *sk = skb->sk;
202 int q_idx;
203
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700204 q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) &
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700205 (VRSS_SEND_TAB_SIZE - 1)];
206
207 /* If queue index changed record the new value */
208 if (q_idx != old_idx &&
209 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
210 sk_tx_queue_set(sk, q_idx);
211
212 return q_idx;
213}
214
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800215/*
216 * Select queue for transmit.
217 *
218 * If a valid queue has already been assigned, then use that.
219 * Otherwise compute tx queue based on hash and the send table.
220 *
221 * This is basically similar to default (__netdev_pick_tx) with the added step
222 * of using the host send_table when no other queue has been assigned.
223 *
224 * TODO support XPS - but get_xps_queue not exported
225 */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700226static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
227 void *accel_priv, select_queue_fallback_t fallback)
228{
stephen hemminger7ce10122017-03-09 14:58:29 -0800229 unsigned int num_tx_queues = ndev->real_num_tx_queues;
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700230 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700231
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700232 if (q_idx < 0 || skb->ooo_okay) {
233 /* If forwarding a packet, we use the recorded queue when
234 * available for better cache locality.
235 */
236 if (skb_rx_queue_recorded(skb))
237 q_idx = skb_get_rx_queue(skb);
238 else
239 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800240 }
241
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700242 while (unlikely(q_idx >= num_tx_queues))
243 q_idx -= num_tx_queues;
244
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700245 return q_idx;
246}
247
KY Srinivasan54a73572014-03-08 19:23:13 -0800248static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
249 struct hv_page_buffer *pb)
250{
251 int j = 0;
252
253 /* Deal with compund pages by ignoring unused part
254 * of the page.
255 */
256 page += (offset >> PAGE_SHIFT);
257 offset &= ~PAGE_MASK;
258
259 while (len > 0) {
260 unsigned long bytes;
261
262 bytes = PAGE_SIZE - offset;
263 if (bytes > len)
264 bytes = len;
265 pb[j].pfn = page_to_pfn(page);
266 pb[j].offset = offset;
267 pb[j].len = bytes;
268
269 offset += bytes;
270 len -= bytes;
271
272 if (offset == PAGE_SIZE && len) {
273 page++;
274 offset = 0;
275 j++;
276 }
277 }
278
279 return j + 1;
280}
281
KY Srinivasan8a002512014-03-08 19:23:14 -0800282static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800283 struct hv_netvsc_packet *packet,
284 struct hv_page_buffer **page_buf)
KY Srinivasan54a73572014-03-08 19:23:13 -0800285{
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800286 struct hv_page_buffer *pb = *page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800287 u32 slots_used = 0;
288 char *data = skb->data;
289 int frags = skb_shinfo(skb)->nr_frags;
290 int i;
291
292 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700293 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800294 * 2. skb linear data
295 * 3. skb fragment data
296 */
297 if (hdr != NULL)
298 slots_used += fill_pg_buf(virt_to_page(hdr),
299 offset_in_page(hdr),
300 len, &pb[slots_used]);
301
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700302 packet->rmsg_size = len;
303 packet->rmsg_pgcnt = slots_used;
304
KY Srinivasan54a73572014-03-08 19:23:13 -0800305 slots_used += fill_pg_buf(virt_to_page(data),
306 offset_in_page(data),
307 skb_headlen(skb), &pb[slots_used]);
308
309 for (i = 0; i < frags; i++) {
310 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
311
312 slots_used += fill_pg_buf(skb_frag_page(frag),
313 frag->page_offset,
314 skb_frag_size(frag), &pb[slots_used]);
315 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800316 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800317}
318
319static int count_skb_frag_slots(struct sk_buff *skb)
320{
321 int i, frags = skb_shinfo(skb)->nr_frags;
322 int pages = 0;
323
324 for (i = 0; i < frags; i++) {
325 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
326 unsigned long size = skb_frag_size(frag);
327 unsigned long offset = frag->page_offset;
328
329 /* Skip unused frames from start of page */
330 offset &= ~PAGE_MASK;
331 pages += PFN_UP(offset + size);
332 }
333 return pages;
334}
335
336static int netvsc_get_slots(struct sk_buff *skb)
337{
338 char *data = skb->data;
339 unsigned int offset = offset_in_page(data);
340 unsigned int len = skb_headlen(skb);
341 int slots;
342 int frag_slots;
343
344 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
345 frag_slots = count_skb_frag_slots(skb);
346 return slots + frag_slots;
347}
348
stephen hemminger23312a32017-01-24 13:05:59 -0800349static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800350{
stephen hemminger23312a32017-01-24 13:05:59 -0800351 if (skb->protocol == htons(ETH_P_IP)) {
352 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800353
stephen hemminger23312a32017-01-24 13:05:59 -0800354 if (ip->protocol == IPPROTO_TCP)
355 return TRANSPORT_INFO_IPV4_TCP;
356 else if (ip->protocol == IPPROTO_UDP)
357 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800358 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800359 struct ipv6hdr *ip6 = ipv6_hdr(skb);
360
361 if (ip6->nexthdr == IPPROTO_TCP)
362 return TRANSPORT_INFO_IPV6_TCP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800363 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800364 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800365 }
366
stephen hemminger23312a32017-01-24 13:05:59 -0800367 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800368}
369
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700370static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700371{
Hank Janssenfceaf242009-07-13 15:34:54 -0700372 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200373 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700374 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800375 unsigned int num_data_pgs;
376 struct rndis_message *rndis_msg;
377 struct rndis_packet *rndis_pkt;
378 u32 rndis_msg_size;
KY Srinivasan8a002512014-03-08 19:23:14 -0800379 struct rndis_per_packet_info *ppi;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700380 u32 hash;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700381 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800382 struct hv_page_buffer *pb = page_buf;
Hank Janssenfceaf242009-07-13 15:34:54 -0700383
KY Srinivasan54a73572014-03-08 19:23:13 -0800384 /* We will atmost need two pages to describe the rndis
385 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200386 * of pages in a single packet. If skb is scattered around
387 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800388 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200389
KY Srinivasan8a002512014-03-08 19:23:14 -0800390 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700391
392 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700393 ++net_device_ctx->eth_stats.tx_scattered;
394
395 if (skb_linearize(skb))
396 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700397
398 num_data_pgs = netvsc_get_slots(skb) + 2;
399 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700400 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700401 goto drop;
402 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800403 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700404
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800405 /*
406 * Place the rndis header in the skb head room and
407 * the skb->cb will be used for hv_netvsc_packet
408 * structure.
409 */
410 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700411 if (ret)
412 goto no_memory;
413
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800414 /* Use the skb control buffer for building up the packet */
415 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
416 FIELD_SIZEOF(struct sk_buff, cb));
417 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700418
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700419 packet->q_idx = skb_get_queue_mapping(skb);
420
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800421 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800422 packet->total_bytes = skb->len;
423 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700424
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800425 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700426
KY Srinivasan24476762015-12-01 16:43:06 -0800427 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700428
KY Srinivasan8a002512014-03-08 19:23:14 -0800429 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800430 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
431 rndis_msg->msg_len = packet->total_data_buflen;
432 rndis_pkt = &rndis_msg->msg.pkt;
433 rndis_pkt->data_offset = sizeof(struct rndis_packet);
434 rndis_pkt->data_len = packet->total_data_buflen;
435 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
436
437 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
438
Haiyang Zhang307f0992014-05-21 12:55:39 -0700439 hash = skb_get_hash_raw(skb);
440 if (hash != 0 && net->real_num_tx_queues > 1) {
441 rndis_msg_size += NDIS_HASH_PPI_SIZE;
442 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
443 NBL_HASH_VALUE);
444 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
445 }
446
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700447 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800448 struct ndis_pkt_8021q_info *vlan;
449
450 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
451 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
452 IEEE_8021Q_INFO);
453 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
454 ppi->ppi_offset);
KY Srinivasan760d1e32015-12-01 16:43:19 -0800455 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
456 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800457 VLAN_PRIO_SHIFT;
458 }
459
stephen hemminger23312a32017-01-24 13:05:59 -0800460 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700461 struct ndis_tcp_lso_info *lso_info;
462
463 rndis_msg_size += NDIS_LSO_PPI_SIZE;
464 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
465 TCP_LARGESEND_PKTINFO);
466
467 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
468 ppi->ppi_offset);
469
470 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800471 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700472 lso_info->lso_v2_transmit.ip_version =
473 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
474 ip_hdr(skb)->tot_len = 0;
475 ip_hdr(skb)->check = 0;
476 tcp_hdr(skb)->check =
477 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
478 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
479 } else {
480 lso_info->lso_v2_transmit.ip_version =
481 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
482 ipv6_hdr(skb)->payload_len = 0;
483 tcp_hdr(skb)->check =
484 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
485 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
486 }
stephen hemminger23312a32017-01-24 13:05:59 -0800487 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700488 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700489 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800490 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
491 struct ndis_tcp_ip_checksum_info *csum_info;
492
stephen hemmingerad19bc82016-10-11 14:03:07 -0700493 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
494 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
495 TCPIP_CHKSUM_PKTINFO);
496
497 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
498 ppi->ppi_offset);
499
stephen hemminger23312a32017-01-24 13:05:59 -0800500 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
501
502 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700503 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800504
505 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
506 csum_info->transmit.tcp_checksum = 1;
507 else
508 csum_info->transmit.udp_checksum = 1;
509 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700510 csum_info->transmit.is_ipv6 = 1;
511
stephen hemminger23312a32017-01-24 13:05:59 -0800512 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
513 csum_info->transmit.tcp_checksum = 1;
514 else
515 csum_info->transmit.udp_checksum = 1;
516 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700517 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800518 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700519 if (skb_checksum_help(skb))
520 goto drop;
521 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700522 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800523
KY Srinivasan8a002512014-03-08 19:23:14 -0800524 /* Start filling in the page buffers with the rndis hdr */
525 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700526 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800527 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800528 skb, packet, &pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800529
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800530 /* timestamp packet in software */
531 skb_tx_timestamp(skb);
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800532 ret = netvsc_send(net_device_ctx->device_ctx, packet,
533 rndis_msg, &pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800534 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700535 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700536
537 if (ret == -EAGAIN) {
538 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700539 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700540 }
541
542 if (ret == -ENOSPC)
543 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700544
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700545drop:
546 dev_kfree_skb_any(skb);
547 net->stats.tx_dropped++;
548
549 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700550
551no_memory:
552 ++net_device_ctx->eth_stats.tx_no_memory;
553 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700554}
Hank Janssen3e189512010-03-04 22:11:00 +0000555/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700556 * netvsc_linkstatus_callback - Link up/down notification
557 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700558void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700559 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700560{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700561 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700562 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700563 struct net_device_context *ndev_ctx;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100564 struct netvsc_reconfig *event;
565 unsigned long flags;
566
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700567 net = hv_get_drvdata(device_obj);
568
569 if (!net)
570 return;
571
572 ndev_ctx = netdev_priv(net);
573
574 /* Update the physical link speed when changing to another vSwitch */
575 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
576 u32 speed;
577
578 speed = *(u32 *)((void *)indicate + indicate->
579 status_buf_offset) / 10000;
580 ndev_ctx->speed = speed;
581 return;
582 }
583
584 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100585 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
586 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
587 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
588 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700589
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700590 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700591 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700592
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100593 event = kzalloc(sizeof(*event), GFP_ATOMIC);
594 if (!event)
595 return;
596 event->event = indicate->status;
597
598 spin_lock_irqsave(&ndev_ctx->lock, flags);
599 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
600 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
601
602 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700603}
604
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700605static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800606 struct napi_struct *napi,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800607 const struct ndis_tcp_ip_checksum_info *csum_info,
608 const struct ndis_pkt_8021q_info *vlan,
609 void *data, u32 buflen)
Hank Janssenfceaf242009-07-13 15:34:54 -0700610{
Hank Janssenfceaf242009-07-13 15:34:54 -0700611 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700612
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800613 skb = napi_alloc_skb(napi, buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700614 if (!skb)
615 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700616
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700617 /*
618 * Copy to skb. This copy is needed here since the memory pointed by
619 * hv_netvsc_packet cannot be deallocated
620 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800621 memcpy(skb_put(skb, buflen), data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700622
623 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700624
625 /* skb is already created with CHECKSUM_NONE */
626 skb_checksum_none_assert(skb);
627
628 /*
629 * In Linux, the IP checksum is always checked.
630 * Do L4 checksum offload if enabled and present.
631 */
632 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
633 if (csum_info->receive.tcp_checksum_succeeded ||
634 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800635 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800636 }
637
stephen hemmingerdc54a082017-01-24 13:06:08 -0800638 if (vlan) {
639 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
640
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700641 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800642 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800643 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700644
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700645 return skb;
646}
647
648/*
649 * netvsc_recv_callback - Callback when we receive a packet from the
650 * "wire" on the specified device.
651 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800652int netvsc_recv_callback(struct net_device *net,
653 struct vmbus_channel *channel,
654 void *data, u32 len,
655 const struct ndis_tcp_ip_checksum_info *csum_info,
656 const struct ndis_pkt_8021q_info *vlan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700657{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200658 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700659 struct netvsc_device *net_device;
stephen hemminger742fe542017-02-27 10:26:50 -0800660 u16 q_idx = channel->offermsg.offer.sub_channel_index;
stephen hemminger545a8e72017-03-22 14:51:00 -0700661 struct netvsc_channel *nvchan;
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700662 struct net_device *vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700663 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700664 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700665
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700666 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700667 return NVSP_STAT_FAIL;
668
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700669 /*
670 * If necessary, inject this packet into the VF interface.
671 * On Hyper-V, multicast and brodcast packets are only delivered
672 * to the synthetic interface (after subjecting these to
673 * policy filters on the host). Deliver these via the VF
674 * interface in the guest.
675 */
stephen hemminger0719e722017-01-11 09:16:32 -0800676 rcu_read_lock();
stephen hemminger545a8e72017-03-22 14:51:00 -0700677 net_device = rcu_dereference(net_device_ctx->nvdev);
678 if (unlikely(!net_device))
679 goto drop;
680
681 nvchan = &net_device->chan_table[q_idx];
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700682 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700683 if (vf_netdev && (vf_netdev->flags & IFF_UP))
684 net = vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700685
686 /* Allocate a skb - TODO direct I/O to pages? */
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800687 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
688 csum_info, vlan, data, len);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700689 if (unlikely(!skb)) {
stephen hemminger545a8e72017-03-22 14:51:00 -0700690drop:
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700691 ++net->stats.rx_dropped;
stephen hemminger0719e722017-01-11 09:16:32 -0800692 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700693 return NVSP_STAT_FAIL;
694 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700695
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700696 if (net != vf_netdev)
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800697 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700698
699 /*
700 * Even if injecting the packet, record the statistics
701 * on the synthetic device because modifying the VF device
702 * statistics will not work correctly.
703 */
stephen hemminger742fe542017-02-27 10:26:50 -0800704 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700705 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700706 rx_stats->packets++;
stephen hemmingerdc54a082017-01-24 13:06:08 -0800707 rx_stats->bytes += len;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700708
709 if (skb->pkt_type == PACKET_BROADCAST)
710 ++rx_stats->broadcast;
711 else if (skb->pkt_type == PACKET_MULTICAST)
712 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700713 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800714
stephen hemminger742fe542017-02-27 10:26:50 -0800715 napi_gro_receive(&nvchan->napi, skb);
stephen hemminger0719e722017-01-11 09:16:32 -0800716 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700717
Hank Janssenfceaf242009-07-13 15:34:54 -0700718 return 0;
719}
720
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700721static void netvsc_get_drvinfo(struct net_device *net,
722 struct ethtool_drvinfo *info)
723{
Jiri Pirko7826d432013-01-06 00:44:26 +0000724 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000725 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700726}
727
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800728static void netvsc_get_channels(struct net_device *net,
729 struct ethtool_channels *channel)
730{
731 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700732 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800733
734 if (nvdev) {
735 channel->max_combined = nvdev->max_chn;
736 channel->combined_count = nvdev->num_chn;
737 }
738}
739
stephen hemminger2b018882017-01-24 13:06:03 -0800740static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
741 u32 num_chn)
742{
743 struct netvsc_device_info device_info;
744 int ret;
745
746 memset(&device_info, 0, sizeof(device_info));
747 device_info.num_chn = num_chn;
748 device_info.ring_size = ring_size;
749 device_info.max_num_vrss_chns = num_chn;
750
751 ret = rndis_filter_device_add(dev, &device_info);
752 if (ret)
753 return ret;
754
755 ret = netif_set_real_num_tx_queues(net, num_chn);
756 if (ret)
757 return ret;
758
759 ret = netif_set_real_num_rx_queues(net, num_chn);
760
761 return ret;
762}
763
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700764static int netvsc_set_channels(struct net_device *net,
765 struct ethtool_channels *channels)
766{
767 struct net_device_context *net_device_ctx = netdev_priv(net);
768 struct hv_device *dev = net_device_ctx->device_ctx;
stephen hemminger545a8e72017-03-22 14:51:00 -0700769 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger2b018882017-01-24 13:06:03 -0800770 unsigned int count = channels->combined_count;
stephen hemminger163891d2017-03-22 14:50:58 -0700771 bool was_running;
stephen hemminger2b018882017-01-24 13:06:03 -0800772 int ret;
773
774 /* We do not support separate count for rx, tx, or other */
775 if (count == 0 ||
776 channels->rx_count || channels->tx_count || channels->other_count)
777 return -EINVAL;
778
779 if (count > net->num_tx_queues || count > net->num_rx_queues)
780 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700781
stephen hemmingera0be4502017-03-22 14:51:01 -0700782 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700783 return -ENODEV;
784
stephen hemminger2b018882017-01-24 13:06:03 -0800785 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700786 return -EINVAL;
787
stephen hemminger2b018882017-01-24 13:06:03 -0800788 if (count > nvdev->max_chn)
789 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700790
stephen hemminger163891d2017-03-22 14:50:58 -0700791 was_running = netif_running(net);
792 if (was_running) {
793 ret = netvsc_close(net);
794 if (ret)
795 return ret;
796 }
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700797
stephen hemminger2289f0a2017-01-24 13:06:10 -0800798 rndis_filter_device_remove(dev, nvdev);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700799
stephen hemminger2b018882017-01-24 13:06:03 -0800800 ret = netvsc_set_queues(net, dev, count);
801 if (ret == 0)
802 nvdev->num_chn = count;
803 else
804 netvsc_set_queues(net, dev, nvdev->num_chn);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700805
stephen hemminger163891d2017-03-22 14:50:58 -0700806 if (was_running)
807 ret = netvsc_open(net);
808
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200809 /* We may have missed link change notifications */
810 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700811
812 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700813}
814
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100815static bool
816netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800817{
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100818 struct ethtool_link_ksettings diff1 = *cmd;
819 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800820
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100821 diff1.base.speed = 0;
822 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800823 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100824 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
825 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800826 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100827 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800828
829 return !memcmp(&diff1, &diff2, sizeof(diff1));
830}
831
832static void netvsc_init_settings(struct net_device *dev)
833{
834 struct net_device_context *ndc = netdev_priv(dev);
835
836 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -0700837 ndc->duplex = DUPLEX_FULL;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800838}
839
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100840static int netvsc_get_link_ksettings(struct net_device *dev,
841 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800842{
843 struct net_device_context *ndc = netdev_priv(dev);
844
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100845 cmd->base.speed = ndc->speed;
846 cmd->base.duplex = ndc->duplex;
847 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800848
849 return 0;
850}
851
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100852static int netvsc_set_link_ksettings(struct net_device *dev,
853 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800854{
855 struct net_device_context *ndc = netdev_priv(dev);
856 u32 speed;
857
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100858 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800859 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100860 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800861 !netvsc_validate_ethtool_ss_cmd(cmd))
862 return -EINVAL;
863
864 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100865 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800866
867 return 0;
868}
869
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800870static int netvsc_change_mtu(struct net_device *ndev, int mtu)
871{
872 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -0700873 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200874 struct hv_device *hdev = ndevctx->device_ctx;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800875 struct netvsc_device_info device_info;
stephen hemminger163891d2017-03-22 14:50:58 -0700876 bool was_running;
K. Y. Srinivasan386f5762017-03-24 20:54:37 -0700877 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800878
stephen hemmingera0be4502017-03-22 14:51:01 -0700879 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800880 return -ENODEV;
881
stephen hemminger163891d2017-03-22 14:50:58 -0700882 was_running = netif_running(ndev);
883 if (was_running) {
884 ret = netvsc_close(ndev);
885 if (ret)
886 return ret;
887 }
Haiyang Zhang2de85302015-07-13 13:09:16 -0700888
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700889 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800890 device_info.ring_size = ring_size;
stephen hemminger2b018882017-01-24 13:06:03 -0800891 device_info.num_chn = nvdev->num_chn;
892 device_info.max_num_vrss_chns = nvdev->num_chn;
Dexuan Cui152669b2017-03-02 13:00:53 +0000893
Dexuan Cui152669b2017-03-02 13:00:53 +0000894 rndis_filter_device_remove(hdev, nvdev);
895
896 /* 'nvdev' has been freed in rndis_filter_device_remove() ->
897 * netvsc_device_remove () -> free_netvsc_device().
898 * We mustn't access it before it's re-created in
899 * rndis_filter_device_add() -> netvsc_device_add().
900 */
901
902 ndev->mtu = mtu;
903
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800904 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800905
stephen hemminger163891d2017-03-22 14:50:58 -0700906 if (was_running)
907 ret = netvsc_open(ndev);
908
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200909 /* We may have missed link change notifications */
910 schedule_delayed_work(&ndevctx->dwork, 0);
911
Haiyang Zhang2de85302015-07-13 13:09:16 -0700912 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800913}
914
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800915static void netvsc_get_stats64(struct net_device *net,
916 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700917{
918 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -0700919 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800920 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700921
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800922 if (!nvdev)
923 return;
924
925 for (i = 0; i < nvdev->num_chn; i++) {
926 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
927 const struct netvsc_stats *stats;
928 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700929 unsigned int start;
930
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800931 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700932 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800933 start = u64_stats_fetch_begin_irq(&stats->syncp);
934 packets = stats->packets;
935 bytes = stats->bytes;
936 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700937
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800938 t->tx_bytes += bytes;
939 t->tx_packets += packets;
940
941 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700942 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800943 start = u64_stats_fetch_begin_irq(&stats->syncp);
944 packets = stats->packets;
945 bytes = stats->bytes;
946 multicast = stats->multicast + stats->broadcast;
947 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700948
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800949 t->rx_bytes += bytes;
950 t->rx_packets += packets;
951 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700952 }
953
954 t->tx_dropped = net->stats.tx_dropped;
Simon Xiaob5124722017-02-17 11:36:20 -0800955 t->tx_errors = net->stats.tx_errors;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700956
957 t->rx_dropped = net->stats.rx_dropped;
958 t->rx_errors = net->stats.rx_errors;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700959}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000960
961static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
962{
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000963 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000964 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000965 unsigned char save_aatype;
966 int err;
967
968 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
969 save_aatype = ndev->addr_assign_type;
970
971 err = eth_mac_addr(ndev, p);
972 if (err != 0)
973 return err;
974
Vitaly Kuznetsove834da9a2016-06-03 17:51:01 +0200975 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000976 if (err != 0) {
977 /* roll back to saved MAC */
978 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
979 ndev->addr_assign_type = save_aatype;
980 }
981
982 return err;
983}
984
Stephen Hemminger4323b472016-08-23 12:17:57 -0700985static const struct {
986 char name[ETH_GSTRING_LEN];
987 u16 offset;
988} netvsc_stats[] = {
989 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
990 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
991 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
992 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
993 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
994};
995
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800996#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
997
998/* 4 statistics per queue (rx/tx packets/bytes) */
999#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1000
Stephen Hemminger4323b472016-08-23 12:17:57 -07001001static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1002{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001003 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001004 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001005
1006 if (!nvdev)
1007 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001008
Stephen Hemminger4323b472016-08-23 12:17:57 -07001009 switch (string_set) {
1010 case ETH_SS_STATS:
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001011 return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001012 default:
1013 return -EINVAL;
1014 }
1015}
1016
1017static void netvsc_get_ethtool_stats(struct net_device *dev,
1018 struct ethtool_stats *stats, u64 *data)
1019{
1020 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001021 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001022 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001023 const struct netvsc_stats *qstats;
1024 unsigned int start;
1025 u64 packets, bytes;
1026 int i, j;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001027
stephen hemminger545a8e72017-03-22 14:51:00 -07001028 if (!nvdev)
1029 return;
1030
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001031 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001032 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001033
1034 for (j = 0; j < nvdev->num_chn; j++) {
1035 qstats = &nvdev->chan_table[j].tx_stats;
1036
1037 do {
1038 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1039 packets = qstats->packets;
1040 bytes = qstats->bytes;
1041 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1042 data[i++] = packets;
1043 data[i++] = bytes;
1044
1045 qstats = &nvdev->chan_table[j].rx_stats;
1046 do {
1047 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1048 packets = qstats->packets;
1049 bytes = qstats->bytes;
1050 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1051 data[i++] = packets;
1052 data[i++] = bytes;
1053 }
Stephen Hemminger4323b472016-08-23 12:17:57 -07001054}
1055
1056static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1057{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001058 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001059 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001060 u8 *p = data;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001061 int i;
1062
stephen hemminger545a8e72017-03-22 14:51:00 -07001063 if (!nvdev)
1064 return;
1065
Stephen Hemminger4323b472016-08-23 12:17:57 -07001066 switch (stringset) {
1067 case ETH_SS_STATS:
1068 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001069 memcpy(p + i * ETH_GSTRING_LEN,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001070 netvsc_stats[i].name, ETH_GSTRING_LEN);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001071
1072 p += i * ETH_GSTRING_LEN;
1073 for (i = 0; i < nvdev->num_chn; i++) {
1074 sprintf(p, "tx_queue_%u_packets", i);
1075 p += ETH_GSTRING_LEN;
1076 sprintf(p, "tx_queue_%u_bytes", i);
1077 p += ETH_GSTRING_LEN;
1078 sprintf(p, "rx_queue_%u_packets", i);
1079 p += ETH_GSTRING_LEN;
1080 sprintf(p, "rx_queue_%u_bytes", i);
1081 p += ETH_GSTRING_LEN;
1082 }
1083
Stephen Hemminger4323b472016-08-23 12:17:57 -07001084 break;
1085 }
1086}
1087
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001088static int
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001089netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
1090 struct ethtool_rxnfc *info)
1091{
1092 info->data = RXH_IP_SRC | RXH_IP_DST;
1093
1094 switch (info->flow_type) {
1095 case TCP_V4_FLOW:
1096 case TCP_V6_FLOW:
1097 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1098 /* fallthrough */
1099 case UDP_V4_FLOW:
1100 case UDP_V6_FLOW:
1101 case IPV4_FLOW:
1102 case IPV6_FLOW:
1103 break;
1104 default:
1105 info->data = 0;
1106 break;
1107 }
1108
1109 return 0;
1110}
1111
1112static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001113netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1114 u32 *rules)
1115{
1116 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001117 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
1118
1119 if (!nvdev)
1120 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001121
1122 switch (info->cmd) {
1123 case ETHTOOL_GRXRINGS:
1124 info->data = nvdev->num_chn;
1125 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001126
1127 case ETHTOOL_GRXFH:
1128 return netvsc_get_rss_hash_opts(nvdev, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001129 }
1130 return -EOPNOTSUPP;
1131}
1132
Richard Weinberger316158f2014-07-09 16:23:59 +02001133#ifdef CONFIG_NET_POLL_CONTROLLER
stephen hemmingera5ecd432017-06-07 15:53:48 -07001134static void netvsc_poll_controller(struct net_device *dev)
Richard Weinberger316158f2014-07-09 16:23:59 +02001135{
stephen hemmingera5ecd432017-06-07 15:53:48 -07001136 struct net_device_context *ndc = netdev_priv(dev);
1137 struct netvsc_device *ndev;
1138 int i;
1139
1140 rcu_read_lock();
1141 ndev = rcu_dereference(ndc->nvdev);
1142 if (ndev) {
1143 for (i = 0; i < ndev->num_chn; i++) {
1144 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1145
1146 napi_schedule(&nvchan->napi);
1147 }
1148 }
1149 rcu_read_unlock();
Richard Weinberger316158f2014-07-09 16:23:59 +02001150}
1151#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001152
stephen hemminger962f3fe2017-01-24 13:06:02 -08001153static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1154{
1155 return NETVSC_HASH_KEYLEN;
1156}
1157
1158static u32 netvsc_rss_indir_size(struct net_device *dev)
1159{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001160 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001161}
1162
1163static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1164 u8 *hfunc)
1165{
1166 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001167 struct netvsc_device *ndev = rcu_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001168 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001169 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001170
stephen hemminger545a8e72017-03-22 14:51:00 -07001171 if (!ndev)
1172 return -ENODEV;
1173
stephen hemminger962f3fe2017-01-24 13:06:02 -08001174 if (hfunc)
1175 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1176
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001177 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001178 if (indir) {
1179 for (i = 0; i < ITAB_NUM; i++)
1180 indir[i] = rndis_dev->ind_table[i];
1181 }
1182
stephen hemminger962f3fe2017-01-24 13:06:02 -08001183 if (key)
1184 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1185
1186 return 0;
1187}
1188
1189static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1190 const u8 *key, const u8 hfunc)
1191{
1192 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001193 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001194 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001195 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001196
stephen hemminger545a8e72017-03-22 14:51:00 -07001197 if (!ndev)
1198 return -ENODEV;
1199
stephen hemminger962f3fe2017-01-24 13:06:02 -08001200 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1201 return -EOPNOTSUPP;
1202
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001203 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001204 if (indir) {
1205 for (i = 0; i < ITAB_NUM; i++)
1206 if (indir[i] >= dev->num_rx_queues)
1207 return -EINVAL;
1208
1209 for (i = 0; i < ITAB_NUM; i++)
1210 rndis_dev->ind_table[i] = indir[i];
1211 }
1212
1213 if (!key) {
1214 if (!indir)
1215 return 0;
1216
1217 key = rndis_dev->rss_key;
1218 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001219
1220 return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
1221}
1222
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001223static const struct ethtool_ops ethtool_ops = {
1224 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001225 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001226 .get_ethtool_stats = netvsc_get_ethtool_stats,
1227 .get_sset_count = netvsc_get_sset_count,
1228 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001229 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001230 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001231 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001232 .get_rxnfc = netvsc_get_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001233 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1234 .get_rxfh_indir_size = netvsc_rss_indir_size,
1235 .get_rxfh = netvsc_get_rxfh,
1236 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001237 .get_link_ksettings = netvsc_get_link_ksettings,
1238 .set_link_ksettings = netvsc_set_link_ksettings,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001239};
1240
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001241static const struct net_device_ops device_ops = {
1242 .ndo_open = netvsc_open,
1243 .ndo_stop = netvsc_close,
1244 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001245 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001246 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001247 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001248 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001249 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001250 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001251#ifdef CONFIG_NET_POLL_CONTROLLER
1252 .ndo_poll_controller = netvsc_poll_controller,
1253#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001254};
1255
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001256/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001257 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1258 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1259 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001260 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001261static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001262{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001263 struct net_device_context *ndev_ctx =
1264 container_of(w, struct net_device_context, dwork.work);
1265 struct hv_device *device_obj = ndev_ctx->device_ctx;
1266 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001267 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001268 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001269 struct netvsc_reconfig *event = NULL;
1270 bool notify = false, reschedule = false;
1271 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001272
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001273 rtnl_lock();
stephen hemmingera0be4502017-03-22 14:51:01 -07001274 net_device = rtnl_dereference(ndev_ctx->nvdev);
1275 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001276 goto out_unlock;
1277
Haiyang Zhang891de742014-02-12 16:54:27 -08001278 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001279
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001280 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1281 if (time_is_after_jiffies(next_reconfig)) {
1282 /* link_watch only sends one notification with current state
1283 * per second, avoid doing reconfig more frequently. Handle
1284 * wrap around.
1285 */
1286 delay = next_reconfig - jiffies;
1287 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1288 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001289 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001290 }
1291 ndev_ctx->last_reconfig = jiffies;
1292
1293 spin_lock_irqsave(&ndev_ctx->lock, flags);
1294 if (!list_empty(&ndev_ctx->reconfig_events)) {
1295 event = list_first_entry(&ndev_ctx->reconfig_events,
1296 struct netvsc_reconfig, list);
1297 list_del(&event->list);
1298 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1299 }
1300 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1301
1302 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001303 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001304
1305 switch (event->event) {
1306 /* Only the following events are possible due to the check in
1307 * netvsc_linkstatus_callback()
1308 */
1309 case RNDIS_STATUS_MEDIA_CONNECT:
1310 if (rdev->link_state) {
1311 rdev->link_state = false;
1312 netif_carrier_on(net);
1313 netif_tx_wake_all_queues(net);
1314 } else {
1315 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001316 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001317 kfree(event);
1318 break;
1319 case RNDIS_STATUS_MEDIA_DISCONNECT:
1320 if (!rdev->link_state) {
1321 rdev->link_state = true;
1322 netif_carrier_off(net);
1323 netif_tx_stop_all_queues(net);
1324 }
1325 kfree(event);
1326 break;
1327 case RNDIS_STATUS_NETWORK_CHANGE:
1328 /* Only makes sense if carrier is present */
1329 if (!rdev->link_state) {
1330 rdev->link_state = true;
1331 netif_carrier_off(net);
1332 netif_tx_stop_all_queues(net);
1333 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1334 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001335 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001336 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1337 reschedule = true;
1338 }
1339 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001340 }
1341
1342 rtnl_unlock();
1343
1344 if (notify)
1345 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001346
1347 /* link_watch only sends one notification with current state per
1348 * second, handle next reconfig event in 2 seconds.
1349 */
1350 if (reschedule)
1351 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001352
1353 return;
1354
1355out_unlock:
1356 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001357}
1358
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001359static struct net_device *get_netvsc_bymac(const u8 *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001360{
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001361 struct net_device *dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001362
Stephen Hemminger8737caa2016-08-23 12:17:44 -07001363 ASSERT_RTNL();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001364
1365 for_each_netdev(&init_net, dev) {
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001366 if (dev->netdev_ops != &device_ops)
1367 continue; /* not a netvsc device */
1368
1369 if (ether_addr_equal(mac, dev->perm_addr))
1370 return dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001371 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001372
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001373 return NULL;
1374}
1375
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001376static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001377{
1378 struct net_device *dev;
1379
1380 ASSERT_RTNL();
1381
1382 for_each_netdev(&init_net, dev) {
1383 struct net_device_context *net_device_ctx;
1384
1385 if (dev->netdev_ops != &device_ops)
1386 continue; /* not a netvsc device */
1387
1388 net_device_ctx = netdev_priv(dev);
1389 if (net_device_ctx->nvdev == NULL)
1390 continue; /* device is removed */
1391
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001392 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001393 return dev; /* a match */
1394 }
1395
1396 return NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001397}
1398
1399static int netvsc_register_vf(struct net_device *vf_netdev)
1400{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001401 struct net_device *ndev;
1402 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001403 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001404
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001405 if (vf_netdev->addr_len != ETH_ALEN)
1406 return NOTIFY_DONE;
1407
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001408 /*
1409 * We will use the MAC address to locate the synthetic interface to
1410 * associate with the VF interface. If we don't find a matching
1411 * synthetic interface, move on.
1412 */
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001413 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001414 if (!ndev)
1415 return NOTIFY_DONE;
1416
1417 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001418 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001419 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001420 return NOTIFY_DONE;
1421
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001422 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001423 /*
1424 * Take a reference on the module.
1425 */
1426 try_module_get(THIS_MODULE);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001427
1428 dev_hold(vf_netdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001429 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001430 return NOTIFY_OK;
1431}
1432
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001433static int netvsc_vf_up(struct net_device *vf_netdev)
1434{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001435 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001436 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001437 struct net_device_context *net_device_ctx;
1438
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001439 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001440 if (!ndev)
1441 return NOTIFY_DONE;
1442
1443 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001444 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001445
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001446 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001447
1448 /*
1449 * Open the device before switching data path.
1450 */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001451 rndis_filter_open(netvsc_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001452
1453 /*
1454 * notify the host to switch the data path.
1455 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001456 netvsc_switch_datapath(ndev, true);
1457 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001458
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001459 netif_carrier_off(ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001460
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001461 /* Now notify peers through VF device. */
1462 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001463
1464 return NOTIFY_OK;
1465}
1466
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001467static int netvsc_vf_down(struct net_device *vf_netdev)
1468{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001469 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001470 struct netvsc_device *netvsc_dev;
1471 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001472
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001473 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001474 if (!ndev)
1475 return NOTIFY_DONE;
1476
1477 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001478 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001479
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001480 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001481 netvsc_switch_datapath(ndev, false);
1482 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001483 rndis_filter_close(netvsc_dev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001484 netif_carrier_on(ndev);
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001485
1486 /* Now notify peers through netvsc device. */
1487 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001488
1489 return NOTIFY_OK;
1490}
1491
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001492static int netvsc_unregister_vf(struct net_device *vf_netdev)
1493{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001494 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001495 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001496
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001497 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001498 if (!ndev)
1499 return NOTIFY_DONE;
1500
1501 net_device_ctx = netdev_priv(ndev);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001502
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001503 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001504
1505 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001506 dev_put(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001507 module_put(THIS_MODULE);
1508 return NOTIFY_OK;
1509}
1510
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001511static int netvsc_probe(struct hv_device *dev,
1512 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001513{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001514 struct net_device *net = NULL;
1515 struct net_device_context *net_device_ctx;
1516 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001517 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001518 int ret;
1519
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001520 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08001521 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001522 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001523 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001524
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001525 netif_carrier_off(net);
1526
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001527 netvsc_init_settings(net);
1528
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001529 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001530 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001531 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1532 if (netif_msg_probe(net_device_ctx))
1533 netdev_dbg(net, "netvsc msg_enable: %d\n",
1534 net_device_ctx->msg_enable);
1535
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001536 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001537
Haiyang Zhang891de742014-02-12 16:54:27 -08001538 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001539
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001540 spin_lock_init(&net_device_ctx->lock);
1541 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1542
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001543 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001544 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001545 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001546
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001547 /* We always need headroom for rndis header */
1548 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1549
Haiyang Zhang692e0842011-09-01 12:19:43 -07001550 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001551 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001552 device_info.ring_size = ring_size;
stephen hemminger3071ada2017-03-22 14:50:59 -07001553 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001554 ret = rndis_filter_device_add(dev, &device_info);
1555 if (ret != 0) {
1556 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001557 free_netdev(net);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001558 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001559 return ret;
1560 }
1561 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1562
stephen hemminger23312a32017-01-24 13:05:59 -08001563 /* hw_features computed in rndis_filter_device_add */
1564 net->features = net->hw_features |
1565 NETIF_F_HIGHDMA | NETIF_F_SG |
1566 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1567 net->vlan_features = net->features;
1568
stephen hemminger545a8e72017-03-22 14:51:00 -07001569 /* RCU not necessary here, device not registered */
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001570 nvdev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001571 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1572 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001573
Jarod Wilsond0c2c992016-10-20 13:55:21 -04001574 /* MTU range: 68 - 1500 or 65521 */
1575 net->min_mtu = NETVSC_MTU_MIN;
1576 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1577 net->max_mtu = NETVSC_MTU - ETH_HLEN;
1578 else
1579 net->max_mtu = ETH_DATA_LEN;
1580
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001581 ret = register_netdev(net);
1582 if (ret != 0) {
1583 pr_err("Unable to register netdev.\n");
stephen hemminger2289f0a2017-01-24 13:06:10 -08001584 rndis_filter_device_remove(dev, nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001585 free_netdev(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001586 }
1587
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001588 return ret;
1589}
1590
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001591static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001592{
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001593 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001594 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001595
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001596 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001597
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001598 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001599 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001600 return 0;
1601 }
1602
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001603 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001604
stephen hemmingera0be4502017-03-22 14:51:01 -07001605 netif_device_detach(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001606
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001607 cancel_delayed_work_sync(&ndev_ctx->dwork);
1608
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001609 /*
1610 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07001611 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001612 */
stephen hemmingera0be4502017-03-22 14:51:01 -07001613 rtnl_lock();
stephen hemminger2289f0a2017-01-24 13:06:10 -08001614 rndis_filter_device_remove(dev, ndev_ctx->nvdev);
stephen hemmingera0be4502017-03-22 14:51:01 -07001615 rtnl_unlock();
1616
1617 unregister_netdev(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001618
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001619 hv_set_drvdata(dev, NULL);
1620
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001621 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001622 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001623}
1624
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001625static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001626 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001627 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001628 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001629};
1630
1631MODULE_DEVICE_TABLE(vmbus, id_table);
1632
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001633/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001634static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001635 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001636 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001637 .probe = netvsc_probe,
1638 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001639};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001640
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001641/*
1642 * On Hyper-V, every VF interface is matched with a corresponding
1643 * synthetic interface. The synthetic interface is presented first
1644 * to the guest. When the corresponding VF instance is registered,
1645 * we will take care of switching the data path.
1646 */
1647static int netvsc_netdev_event(struct notifier_block *this,
1648 unsigned long event, void *ptr)
1649{
1650 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1651
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001652 /* Skip our own events */
1653 if (event_dev->netdev_ops == &device_ops)
1654 return NOTIFY_DONE;
1655
1656 /* Avoid non-Ethernet type devices */
1657 if (event_dev->type != ARPHRD_ETHER)
1658 return NOTIFY_DONE;
1659
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001660 /* Avoid Vlan dev with same MAC registering as VF */
Parav Panditd0d7b102017-02-04 11:00:49 -06001661 if (is_vlan_dev(event_dev))
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001662 return NOTIFY_DONE;
1663
1664 /* Avoid Bonding master dev with same MAC registering as VF */
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001665 if ((event_dev->priv_flags & IFF_BONDING) &&
1666 (event_dev->flags & IFF_MASTER))
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07001667 return NOTIFY_DONE;
1668
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001669 switch (event) {
1670 case NETDEV_REGISTER:
1671 return netvsc_register_vf(event_dev);
1672 case NETDEV_UNREGISTER:
1673 return netvsc_unregister_vf(event_dev);
1674 case NETDEV_UP:
1675 return netvsc_vf_up(event_dev);
1676 case NETDEV_DOWN:
1677 return netvsc_vf_down(event_dev);
1678 default:
1679 return NOTIFY_DONE;
1680 }
1681}
1682
1683static struct notifier_block netvsc_netdev_notifier = {
1684 .notifier_call = netvsc_netdev_event,
1685};
1686
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001687static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001688{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001689 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001690 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001691}
1692
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001693static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001694{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001695 int ret;
1696
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001697 if (ring_size < RING_SIZE_MIN) {
1698 ring_size = RING_SIZE_MIN;
1699 pr_info("Increased ring_size to %d (min allowed)\n",
1700 ring_size);
1701 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001702 ret = vmbus_driver_register(&netvsc_drv);
1703
1704 if (ret)
1705 return ret;
1706
1707 register_netdevice_notifier(&netvsc_netdev_notifier);
1708 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001709}
1710
Hank Janssen26c14cc2010-02-11 23:02:42 +00001711MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07001712MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001713
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001714module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001715module_exit(netvsc_drv_exit);