blob: 40a88387a8f5655544a139622e2c7e57344ddcdb [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000043#define RING_SIZE_MIN 64
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010044#define LINKCHANGE_INT (2 * HZ)
stephen hemmingera50af862016-12-06 13:43:54 -080045
Hank Janssen99c8da02010-10-12 10:45:23 -070046static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070047module_param(ring_size, int, S_IRUGO);
48MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070049
KY Srinivasane01ec212015-05-27 13:16:57 -070050static int max_num_vrss_chns = 8;
51
Simon Xiao3f300ff2015-04-28 01:05:17 -070052static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
53 NETIF_MSG_LINK | NETIF_MSG_IFUP |
54 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
55 NETIF_MSG_TX_ERR;
56
57static int debug = -1;
58module_param(debug, int, S_IRUGO);
59MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
60
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080061static void do_set_multicast(struct work_struct *w)
62{
Wenqi Ma792df872012-04-19 00:39:37 +000063 struct net_device_context *ndevctx =
64 container_of(w, struct net_device_context, work);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020065 struct hv_device *device_obj = ndevctx->device_ctx;
66 struct net_device *ndev = hv_get_drvdata(device_obj);
67 struct netvsc_device *nvdev = ndevctx->nvdev;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080068 struct rndis_device *rdev;
69
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020070 if (!nvdev)
Wenqi Ma792df872012-04-19 00:39:37 +000071 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080072
73 rdev = nvdev->extension;
74 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000075 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080076
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020077 if (ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080078 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS);
80 else
81 rndis_filter_set_packet_filter(rdev,
82 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080085}
86
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070087static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070088{
Wenqi Ma792df872012-04-19 00:39:37 +000089 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080090
Wenqi Ma792df872012-04-19 00:39:37 +000091 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -070092}
93
Hank Janssenfceaf242009-07-13 15:34:54 -070094static int netvsc_open(struct net_device *net)
95{
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +020096 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
Haiyang Zhang891de742014-02-12 16:54:27 -080097 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070098 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -070099
Haiyang Zhang891de742014-02-12 16:54:27 -0800100 netif_carrier_off(net);
101
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700102 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200103 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700104 if (ret != 0) {
105 netdev_err(net, "unable to open device (ret %d).\n", ret);
106 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700107 }
108
Haiyang Zhang2de85302015-07-13 13:09:16 -0700109 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700110
Haiyang Zhang891de742014-02-12 16:54:27 -0800111 rdev = nvdev->extension;
112 if (!rdev->link_state)
113 netif_carrier_on(net);
114
Hank Janssenfceaf242009-07-13 15:34:54 -0700115 return ret;
116}
117
Hank Janssenfceaf242009-07-13 15:34:54 -0700118static int netvsc_close(struct net_device *net)
119{
Hank Janssenfceaf242009-07-13 15:34:54 -0700120 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200121 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700122 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700123 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
124 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700125
Haiyang Zhang0a282532012-02-02 07:17:59 +0000126 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700127
Wenqi Ma792df872012-04-19 00:39:37 +0000128 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
129 cancel_work_sync(&net_device_ctx->work);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200130 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700131 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700132 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700133 return ret;
134 }
135
136 /* Ensure pending bytes in ring are read */
137 while (true) {
138 aread = 0;
139 for (i = 0; i < nvdev->num_chn; i++) {
140 chn = nvdev->chn_table[i];
141 if (!chn)
142 continue;
143
144 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
145 &awrite);
146
147 if (aread)
148 break;
149
150 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
151 &awrite);
152
153 if (aread)
154 break;
155 }
156
157 retry++;
158 if (retry > retry_max || aread == 0)
159 break;
160
161 msleep(msec);
162
163 if (msec < 1000)
164 msec *= 2;
165 }
166
167 if (aread) {
168 netdev_err(net, "Ring buffer not empty after closing rndis\n");
169 ret = -ETIMEDOUT;
170 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700171
Hank Janssenfceaf242009-07-13 15:34:54 -0700172 return ret;
173}
174
KY Srinivasan8a002512014-03-08 19:23:14 -0800175static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
176 int pkt_type)
177{
178 struct rndis_packet *rndis_pkt;
179 struct rndis_per_packet_info *ppi;
180
181 rndis_pkt = &msg->msg.pkt;
182 rndis_pkt->data_offset += ppi_size;
183
184 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
185 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
186
187 ppi->size = ppi_size;
188 ppi->type = pkt_type;
189 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
190
191 rndis_pkt->per_pkt_info_len += ppi_size;
192
193 return ppi;
194}
195
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700196static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
197 void *accel_priv, select_queue_fallback_t fallback)
198{
199 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200200 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700201 u32 hash;
202 u16 q_idx = 0;
203
204 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
205 return 0;
206
Vitaly Kuznetsov757647e2016-01-25 16:00:41 +0100207 hash = skb_get_hash(skb);
208 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
209 ndev->real_num_tx_queues;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700210
Vitaly Kuznetsov8b9fbe12015-12-01 16:43:11 -0800211 if (!nvsc_dev->chn_table[q_idx])
212 q_idx = 0;
213
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700214 return q_idx;
215}
216
KY Srinivasan54a73572014-03-08 19:23:13 -0800217static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
218 struct hv_page_buffer *pb)
219{
220 int j = 0;
221
222 /* Deal with compund pages by ignoring unused part
223 * of the page.
224 */
225 page += (offset >> PAGE_SHIFT);
226 offset &= ~PAGE_MASK;
227
228 while (len > 0) {
229 unsigned long bytes;
230
231 bytes = PAGE_SIZE - offset;
232 if (bytes > len)
233 bytes = len;
234 pb[j].pfn = page_to_pfn(page);
235 pb[j].offset = offset;
236 pb[j].len = bytes;
237
238 offset += bytes;
239 len -= bytes;
240
241 if (offset == PAGE_SIZE && len) {
242 page++;
243 offset = 0;
244 j++;
245 }
246 }
247
248 return j + 1;
249}
250
KY Srinivasan8a002512014-03-08 19:23:14 -0800251static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800252 struct hv_netvsc_packet *packet,
253 struct hv_page_buffer **page_buf)
KY Srinivasan54a73572014-03-08 19:23:13 -0800254{
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800255 struct hv_page_buffer *pb = *page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800256 u32 slots_used = 0;
257 char *data = skb->data;
258 int frags = skb_shinfo(skb)->nr_frags;
259 int i;
260
261 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700262 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800263 * 2. skb linear data
264 * 3. skb fragment data
265 */
266 if (hdr != NULL)
267 slots_used += fill_pg_buf(virt_to_page(hdr),
268 offset_in_page(hdr),
269 len, &pb[slots_used]);
270
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700271 packet->rmsg_size = len;
272 packet->rmsg_pgcnt = slots_used;
273
KY Srinivasan54a73572014-03-08 19:23:13 -0800274 slots_used += fill_pg_buf(virt_to_page(data),
275 offset_in_page(data),
276 skb_headlen(skb), &pb[slots_used]);
277
278 for (i = 0; i < frags; i++) {
279 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
280
281 slots_used += fill_pg_buf(skb_frag_page(frag),
282 frag->page_offset,
283 skb_frag_size(frag), &pb[slots_used]);
284 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800285 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800286}
287
288static int count_skb_frag_slots(struct sk_buff *skb)
289{
290 int i, frags = skb_shinfo(skb)->nr_frags;
291 int pages = 0;
292
293 for (i = 0; i < frags; i++) {
294 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
295 unsigned long size = skb_frag_size(frag);
296 unsigned long offset = frag->page_offset;
297
298 /* Skip unused frames from start of page */
299 offset &= ~PAGE_MASK;
300 pages += PFN_UP(offset + size);
301 }
302 return pages;
303}
304
305static int netvsc_get_slots(struct sk_buff *skb)
306{
307 char *data = skb->data;
308 unsigned int offset = offset_in_page(data);
309 unsigned int len = skb_headlen(skb);
310 int slots;
311 int frag_slots;
312
313 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
314 frag_slots = count_skb_frag_slots(skb);
315 return slots + frag_slots;
316}
317
stephen hemminger23312a32017-01-24 13:05:59 -0800318static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800319{
stephen hemminger23312a32017-01-24 13:05:59 -0800320 if (skb->protocol == htons(ETH_P_IP)) {
321 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800322
stephen hemminger23312a32017-01-24 13:05:59 -0800323 if (ip->protocol == IPPROTO_TCP)
324 return TRANSPORT_INFO_IPV4_TCP;
325 else if (ip->protocol == IPPROTO_UDP)
326 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800327 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800328 struct ipv6hdr *ip6 = ipv6_hdr(skb);
329
330 if (ip6->nexthdr == IPPROTO_TCP)
331 return TRANSPORT_INFO_IPV6_TCP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800332 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800333 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800334 }
335
stephen hemminger23312a32017-01-24 13:05:59 -0800336 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800337}
338
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700339static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700340{
Hank Janssenfceaf242009-07-13 15:34:54 -0700341 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200342 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700343 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800344 unsigned int num_data_pgs;
345 struct rndis_message *rndis_msg;
346 struct rndis_packet *rndis_pkt;
347 u32 rndis_msg_size;
KY Srinivasan8a002512014-03-08 19:23:14 -0800348 struct rndis_per_packet_info *ppi;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700349 u32 hash;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200350 u32 skb_length;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700351 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800352 struct hv_page_buffer *pb = page_buf;
Hank Janssenfceaf242009-07-13 15:34:54 -0700353
KY Srinivasan54a73572014-03-08 19:23:13 -0800354 /* We will atmost need two pages to describe the rndis
355 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200356 * of pages in a single packet. If skb is scattered around
357 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800358 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200359
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200360 skb_length = skb->len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800361 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700362
363 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700364 ++net_device_ctx->eth_stats.tx_scattered;
365
366 if (skb_linearize(skb))
367 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700368
369 num_data_pgs = netvsc_get_slots(skb) + 2;
370 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700371 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700372 goto drop;
373 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800374 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700375
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800376 /*
377 * Place the rndis header in the skb head room and
378 * the skb->cb will be used for hv_netvsc_packet
379 * structure.
380 */
381 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700382 if (ret)
383 goto no_memory;
384
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800385 /* Use the skb control buffer for building up the packet */
386 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
387 FIELD_SIZEOF(struct sk_buff, cb));
388 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700389
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700390 packet->q_idx = skb_get_queue_mapping(skb);
391
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800392 packet->total_data_buflen = skb->len;
Hank Janssenfceaf242009-07-13 15:34:54 -0700393
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800394 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700395
KY Srinivasan24476762015-12-01 16:43:06 -0800396 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700397
KY Srinivasan8a002512014-03-08 19:23:14 -0800398 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800399 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
400 rndis_msg->msg_len = packet->total_data_buflen;
401 rndis_pkt = &rndis_msg->msg.pkt;
402 rndis_pkt->data_offset = sizeof(struct rndis_packet);
403 rndis_pkt->data_len = packet->total_data_buflen;
404 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
405
406 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
407
Haiyang Zhang307f0992014-05-21 12:55:39 -0700408 hash = skb_get_hash_raw(skb);
409 if (hash != 0 && net->real_num_tx_queues > 1) {
410 rndis_msg_size += NDIS_HASH_PPI_SIZE;
411 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
412 NBL_HASH_VALUE);
413 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
414 }
415
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700416 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800417 struct ndis_pkt_8021q_info *vlan;
418
419 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
420 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
421 IEEE_8021Q_INFO);
422 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
423 ppi->ppi_offset);
KY Srinivasan760d1e32015-12-01 16:43:19 -0800424 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
425 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800426 VLAN_PRIO_SHIFT;
427 }
428
stephen hemminger23312a32017-01-24 13:05:59 -0800429 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700430 struct ndis_tcp_lso_info *lso_info;
431
432 rndis_msg_size += NDIS_LSO_PPI_SIZE;
433 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
434 TCP_LARGESEND_PKTINFO);
435
436 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
437 ppi->ppi_offset);
438
439 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800440 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700441 lso_info->lso_v2_transmit.ip_version =
442 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
443 ip_hdr(skb)->tot_len = 0;
444 ip_hdr(skb)->check = 0;
445 tcp_hdr(skb)->check =
446 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
447 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
448 } else {
449 lso_info->lso_v2_transmit.ip_version =
450 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
451 ipv6_hdr(skb)->payload_len = 0;
452 tcp_hdr(skb)->check =
453 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
454 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
455 }
stephen hemminger23312a32017-01-24 13:05:59 -0800456 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700457 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700458 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800459 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
460 struct ndis_tcp_ip_checksum_info *csum_info;
461
stephen hemmingerad19bc82016-10-11 14:03:07 -0700462 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
463 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
464 TCPIP_CHKSUM_PKTINFO);
465
466 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
467 ppi->ppi_offset);
468
stephen hemminger23312a32017-01-24 13:05:59 -0800469 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
470
471 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700472 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800473
474 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
475 csum_info->transmit.tcp_checksum = 1;
476 else
477 csum_info->transmit.udp_checksum = 1;
478 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700479 csum_info->transmit.is_ipv6 = 1;
480
stephen hemminger23312a32017-01-24 13:05:59 -0800481 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
482 csum_info->transmit.tcp_checksum = 1;
483 else
484 csum_info->transmit.udp_checksum = 1;
485 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700486 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800487 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700488 if (skb_checksum_help(skb))
489 goto drop;
490 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700491 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800492
KY Srinivasan8a002512014-03-08 19:23:14 -0800493 /* Start filling in the page buffers with the rndis hdr */
494 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700495 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800496 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800497 skb, packet, &pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800498
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800499 /* timestamp packet in software */
500 skb_tx_timestamp(skb);
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800501 ret = netvsc_send(net_device_ctx->device_ctx, packet,
502 rndis_msg, &pb, skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700503 if (likely(ret == 0)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700504 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
505
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700506 u64_stats_update_begin(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700507 tx_stats->packets++;
508 tx_stats->bytes += skb_length;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700509 u64_stats_update_end(&tx_stats->syncp);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700510 return NETDEV_TX_OK;
Hank Janssenfceaf242009-07-13 15:34:54 -0700511 }
Stephen Hemminger4323b472016-08-23 12:17:57 -0700512
513 if (ret == -EAGAIN) {
514 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700515 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700516 }
517
518 if (ret == -ENOSPC)
519 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700520
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700521drop:
522 dev_kfree_skb_any(skb);
523 net->stats.tx_dropped++;
524
525 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700526
527no_memory:
528 ++net_device_ctx->eth_stats.tx_no_memory;
529 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700530}
531
Hank Janssen3e189512010-03-04 22:11:00 +0000532/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700533 * netvsc_linkstatus_callback - Link up/down notification
534 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700535void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700536 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700537{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700538 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700539 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700540 struct net_device_context *ndev_ctx;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100541 struct netvsc_reconfig *event;
542 unsigned long flags;
543
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700544 net = hv_get_drvdata(device_obj);
545
546 if (!net)
547 return;
548
549 ndev_ctx = netdev_priv(net);
550
551 /* Update the physical link speed when changing to another vSwitch */
552 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
553 u32 speed;
554
555 speed = *(u32 *)((void *)indicate + indicate->
556 status_buf_offset) / 10000;
557 ndev_ctx->speed = speed;
558 return;
559 }
560
561 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100562 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
563 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
564 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
565 return;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700566
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700567 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700568 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700569
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100570 event = kzalloc(sizeof(*event), GFP_ATOMIC);
571 if (!event)
572 return;
573 event->event = indicate->status;
574
575 spin_lock_irqsave(&ndev_ctx->lock, flags);
576 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
577 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
578
579 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700580}
581
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700582static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
KY Srinivasane3d605e2014-03-08 19:23:16 -0800583 struct hv_netvsc_packet *packet,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800584 struct ndis_tcp_ip_checksum_info *csum_info,
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700585 void *data, u16 vlan_tci)
Hank Janssenfceaf242009-07-13 15:34:54 -0700586{
Hank Janssenfceaf242009-07-13 15:34:54 -0700587 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700588
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800589 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700590 if (!skb)
591 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700592
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700593 /*
594 * Copy to skb. This copy is needed here since the memory pointed by
595 * hv_netvsc_packet cannot be deallocated
596 */
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700597 memcpy(skb_put(skb, packet->total_data_buflen), data,
598 packet->total_data_buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700599
600 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700601
602 /* skb is already created with CHECKSUM_NONE */
603 skb_checksum_none_assert(skb);
604
605 /*
606 * In Linux, the IP checksum is always checked.
607 * Do L4 checksum offload if enabled and present.
608 */
609 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
610 if (csum_info->receive.tcp_checksum_succeeded ||
611 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800612 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800613 }
614
KY Srinivasan760d1e32015-12-01 16:43:19 -0800615 if (vlan_tci & VLAN_TAG_PRESENT)
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700616 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800617 vlan_tci);
Hank Janssenfceaf242009-07-13 15:34:54 -0700618
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700619 return skb;
620}
621
622/*
623 * netvsc_recv_callback - Callback when we receive a packet from the
624 * "wire" on the specified device.
625 */
626int netvsc_recv_callback(struct hv_device *device_obj,
627 struct hv_netvsc_packet *packet,
628 void **data,
629 struct ndis_tcp_ip_checksum_info *csum_info,
630 struct vmbus_channel *channel,
631 u16 vlan_tci)
632{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200633 struct net_device *net = hv_get_drvdata(device_obj);
634 struct net_device_context *net_device_ctx = netdev_priv(net);
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700635 struct net_device *vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700636 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700637 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700638
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700639 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700640 return NVSP_STAT_FAIL;
641
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700642 /*
643 * If necessary, inject this packet into the VF interface.
644 * On Hyper-V, multicast and brodcast packets are only delivered
645 * to the synthetic interface (after subjecting these to
646 * policy filters on the host). Deliver these via the VF
647 * interface in the guest.
648 */
stephen hemminger0719e722017-01-11 09:16:32 -0800649 rcu_read_lock();
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700650 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700651 if (vf_netdev && (vf_netdev->flags & IFF_UP))
652 net = vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700653
654 /* Allocate a skb - TODO direct I/O to pages? */
655 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
656 if (unlikely(!skb)) {
657 ++net->stats.rx_dropped;
stephen hemminger0719e722017-01-11 09:16:32 -0800658 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700659 return NVSP_STAT_FAIL;
660 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700661
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700662 if (net != vf_netdev)
663 skb_record_rx_queue(skb,
664 channel->offermsg.offer.sub_channel_index);
665
666 /*
667 * Even if injecting the packet, record the statistics
668 * on the synthetic device because modifying the VF device
669 * statistics will not work correctly.
670 */
671 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700672 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700673 rx_stats->packets++;
674 rx_stats->bytes += packet->total_data_buflen;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700675
676 if (skb->pkt_type == PACKET_BROADCAST)
677 ++rx_stats->broadcast;
678 else if (skb->pkt_type == PACKET_MULTICAST)
679 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700680 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800681
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700682 /*
683 * Pass the skb back up. Network stack will deallocate the skb when it
Stephen Hemminger9495c282010-03-09 17:42:17 -0800684 * is done.
685 * TODO - use NAPI?
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700686 */
Stephen Hemminger9495c282010-03-09 17:42:17 -0800687 netif_rx(skb);
stephen hemminger0719e722017-01-11 09:16:32 -0800688 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700689
Hank Janssenfceaf242009-07-13 15:34:54 -0700690 return 0;
691}
692
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700693static void netvsc_get_drvinfo(struct net_device *net,
694 struct ethtool_drvinfo *info)
695{
Jiri Pirko7826d432013-01-06 00:44:26 +0000696 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000697 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700698}
699
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800700static void netvsc_get_channels(struct net_device *net,
701 struct ethtool_channels *channel)
702{
703 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200704 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800705
706 if (nvdev) {
707 channel->max_combined = nvdev->max_chn;
708 channel->combined_count = nvdev->num_chn;
709 }
710}
711
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700712static int netvsc_set_channels(struct net_device *net,
713 struct ethtool_channels *channels)
714{
715 struct net_device_context *net_device_ctx = netdev_priv(net);
716 struct hv_device *dev = net_device_ctx->device_ctx;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200717 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700718 struct netvsc_device_info device_info;
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700719 u32 num_chn;
720 u32 max_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700721 int ret = 0;
722 bool recovering = false;
723
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +0200724 if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700725 return -ENODEV;
726
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700727 num_chn = nvdev->num_chn;
728 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
729
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700730 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
731 pr_info("vRSS unsupported before NVSP Version 5\n");
732 return -EINVAL;
733 }
734
735 /* We do not support rx, tx, or other */
736 if (!channels ||
737 channels->rx_count ||
738 channels->tx_count ||
739 channels->other_count ||
740 (channels->combined_count < 1))
741 return -EINVAL;
742
743 if (channels->combined_count > max_chn) {
744 pr_info("combined channels too high, using %d\n", max_chn);
745 channels->combined_count = max_chn;
746 }
747
748 ret = netvsc_close(net);
749 if (ret)
750 goto out;
751
752 do_set:
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200753 net_device_ctx->start_remove = true;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700754 rndis_filter_device_remove(dev);
755
756 nvdev->num_chn = channels->combined_count;
757
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700758 memset(&device_info, 0, sizeof(device_info));
759 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
760 device_info.ring_size = ring_size;
761 device_info.max_num_vrss_chns = max_num_vrss_chns;
762
763 ret = rndis_filter_device_add(dev, &device_info);
764 if (ret) {
765 if (recovering) {
766 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
767 return ret;
768 }
769 goto recover;
770 }
771
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200772 nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700773
774 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
775 if (ret) {
776 if (recovering) {
777 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
778 return ret;
779 }
780 goto recover;
781 }
782
783 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
784 if (ret) {
785 if (recovering) {
786 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
787 return ret;
788 }
789 goto recover;
790 }
791
792 out:
793 netvsc_open(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200794 net_device_ctx->start_remove = false;
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200795 /* We may have missed link change notifications */
796 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700797
798 return ret;
799
800 recover:
801 /* If the above failed, we attempt to recover through the same
802 * process but with the original number of channels.
803 */
804 netdev_err(net, "could not set channels, recovering\n");
805 recovering = true;
806 channels->combined_count = num_chn;
807 goto do_set;
808}
809
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800810static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
811{
812 struct ethtool_cmd diff1 = *cmd;
813 struct ethtool_cmd diff2 = {};
814
815 ethtool_cmd_speed_set(&diff1, 0);
816 diff1.duplex = 0;
817 /* advertising and cmd are usually set */
818 diff1.advertising = 0;
819 diff1.cmd = 0;
820 /* We set port to PORT_OTHER */
821 diff2.port = PORT_OTHER;
822
823 return !memcmp(&diff1, &diff2, sizeof(diff1));
824}
825
826static void netvsc_init_settings(struct net_device *dev)
827{
828 struct net_device_context *ndc = netdev_priv(dev);
829
830 ndc->speed = SPEED_UNKNOWN;
831 ndc->duplex = DUPLEX_UNKNOWN;
832}
833
834static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
835{
836 struct net_device_context *ndc = netdev_priv(dev);
837
838 ethtool_cmd_speed_set(cmd, ndc->speed);
839 cmd->duplex = ndc->duplex;
840 cmd->port = PORT_OTHER;
841
842 return 0;
843}
844
845static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
846{
847 struct net_device_context *ndc = netdev_priv(dev);
848 u32 speed;
849
850 speed = ethtool_cmd_speed(cmd);
851 if (!ethtool_validate_speed(speed) ||
852 !ethtool_validate_duplex(cmd->duplex) ||
853 !netvsc_validate_ethtool_ss_cmd(cmd))
854 return -EINVAL;
855
856 ndc->speed = speed;
857 ndc->duplex = cmd->duplex;
858
859 return 0;
860}
861
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800862static int netvsc_change_mtu(struct net_device *ndev, int mtu)
863{
864 struct net_device_context *ndevctx = netdev_priv(ndev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200865 struct netvsc_device *nvdev = ndevctx->nvdev;
866 struct hv_device *hdev = ndevctx->device_ctx;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800867 struct netvsc_device_info device_info;
Haiyang Zhangd212b462016-03-23 09:43:09 -0700868 u32 num_chn;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700869 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800870
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +0200871 if (ndevctx->start_remove || !nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800872 return -ENODEV;
873
Haiyang Zhang2de85302015-07-13 13:09:16 -0700874 ret = netvsc_close(ndev);
875 if (ret)
876 goto out;
877
Haiyang Zhangd212b462016-03-23 09:43:09 -0700878 num_chn = nvdev->num_chn;
879
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200880 ndevctx->start_remove = true;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800881 rndis_filter_device_remove(hdev);
882
883 ndev->mtu = mtu;
884
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700885 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800886 device_info.ring_size = ring_size;
Haiyang Zhangd212b462016-03-23 09:43:09 -0700887 device_info.num_chn = num_chn;
KY Srinivasane01ec212015-05-27 13:16:57 -0700888 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800889 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800890
Haiyang Zhang2de85302015-07-13 13:09:16 -0700891out:
892 netvsc_open(ndev);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200893 ndevctx->start_remove = false;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700894
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200895 /* We may have missed link change notifications */
896 schedule_delayed_work(&ndevctx->dwork, 0);
897
Haiyang Zhang2de85302015-07-13 13:09:16 -0700898 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800899}
900
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800901static void netvsc_get_stats64(struct net_device *net,
902 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700903{
904 struct net_device_context *ndev_ctx = netdev_priv(net);
905 int cpu;
906
907 for_each_possible_cpu(cpu) {
908 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
909 cpu);
910 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
911 cpu);
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700912 u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700913 unsigned int start;
914
915 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700916 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700917 tx_packets = tx_stats->packets;
918 tx_bytes = tx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700919 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700920
921 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700922 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700923 rx_packets = rx_stats->packets;
924 rx_bytes = rx_stats->bytes;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700925 rx_multicast = rx_stats->multicast + rx_stats->broadcast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700926 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700927
928 t->tx_bytes += tx_bytes;
929 t->tx_packets += tx_packets;
930 t->rx_bytes += rx_bytes;
931 t->rx_packets += rx_packets;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700932 t->multicast += rx_multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700933 }
934
935 t->tx_dropped = net->stats.tx_dropped;
936 t->tx_errors = net->stats.tx_dropped;
937
938 t->rx_dropped = net->stats.rx_dropped;
939 t->rx_errors = net->stats.rx_errors;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700940}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000941
942static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
943{
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000944 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000945 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000946 unsigned char save_aatype;
947 int err;
948
949 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
950 save_aatype = ndev->addr_assign_type;
951
952 err = eth_mac_addr(ndev, p);
953 if (err != 0)
954 return err;
955
Vitaly Kuznetsove834da9a2016-06-03 17:51:01 +0200956 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000957 if (err != 0) {
958 /* roll back to saved MAC */
959 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
960 ndev->addr_assign_type = save_aatype;
961 }
962
963 return err;
964}
965
Stephen Hemminger4323b472016-08-23 12:17:57 -0700966static const struct {
967 char name[ETH_GSTRING_LEN];
968 u16 offset;
969} netvsc_stats[] = {
970 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
971 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
972 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
973 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
974 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
975};
976
977static int netvsc_get_sset_count(struct net_device *dev, int string_set)
978{
979 switch (string_set) {
980 case ETH_SS_STATS:
981 return ARRAY_SIZE(netvsc_stats);
982 default:
983 return -EINVAL;
984 }
985}
986
987static void netvsc_get_ethtool_stats(struct net_device *dev,
988 struct ethtool_stats *stats, u64 *data)
989{
990 struct net_device_context *ndc = netdev_priv(dev);
991 const void *nds = &ndc->eth_stats;
992 int i;
993
994 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
995 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
996}
997
998static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
999{
1000 int i;
1001
1002 switch (stringset) {
1003 case ETH_SS_STATS:
1004 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1005 memcpy(data + i * ETH_GSTRING_LEN,
1006 netvsc_stats[i].name, ETH_GSTRING_LEN);
1007 break;
1008 }
1009}
1010
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001011static int
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001012netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
1013 struct ethtool_rxnfc *info)
1014{
1015 info->data = RXH_IP_SRC | RXH_IP_DST;
1016
1017 switch (info->flow_type) {
1018 case TCP_V4_FLOW:
1019 case TCP_V6_FLOW:
1020 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1021 /* fallthrough */
1022 case UDP_V4_FLOW:
1023 case UDP_V6_FLOW:
1024 case IPV4_FLOW:
1025 case IPV6_FLOW:
1026 break;
1027 default:
1028 info->data = 0;
1029 break;
1030 }
1031
1032 return 0;
1033}
1034
1035static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001036netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1037 u32 *rules)
1038{
1039 struct net_device_context *ndc = netdev_priv(dev);
1040 struct netvsc_device *nvdev = ndc->nvdev;
1041
1042 switch (info->cmd) {
1043 case ETHTOOL_GRXRINGS:
1044 info->data = nvdev->num_chn;
1045 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001046
1047 case ETHTOOL_GRXFH:
1048 return netvsc_get_rss_hash_opts(nvdev, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001049 }
1050 return -EOPNOTSUPP;
1051}
1052
Richard Weinberger316158f2014-07-09 16:23:59 +02001053#ifdef CONFIG_NET_POLL_CONTROLLER
1054static void netvsc_poll_controller(struct net_device *net)
1055{
1056 /* As netvsc_start_xmit() works synchronous we don't have to
1057 * trigger anything here.
1058 */
1059}
1060#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001061
stephen hemminger962f3fe2017-01-24 13:06:02 -08001062static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1063{
1064 return NETVSC_HASH_KEYLEN;
1065}
1066
1067static u32 netvsc_rss_indir_size(struct net_device *dev)
1068{
1069 return 0;
1070}
1071
1072static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1073 u8 *hfunc)
1074{
1075 struct net_device_context *ndc = netdev_priv(dev);
1076 struct netvsc_device *ndev = ndc->nvdev;
1077 struct rndis_device *rndis_dev = ndev->extension;
1078
1079 if (hfunc)
1080 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1081
1082 if (key)
1083 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1084
1085 return 0;
1086}
1087
1088static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1089 const u8 *key, const u8 hfunc)
1090{
1091 struct net_device_context *ndc = netdev_priv(dev);
1092 struct netvsc_device *ndev = ndc->nvdev;
1093 struct rndis_device *rndis_dev = ndev->extension;
1094
1095 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1096 return -EOPNOTSUPP;
1097
1098 if (!key || memcmp(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN) == 0)
1099 return 0; /* no change */
1100
1101 return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
1102}
1103
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001104static const struct ethtool_ops ethtool_ops = {
1105 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001106 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001107 .get_ethtool_stats = netvsc_get_ethtool_stats,
1108 .get_sset_count = netvsc_get_sset_count,
1109 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001110 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001111 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001112 .get_ts_info = ethtool_op_get_ts_info,
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001113 .get_settings = netvsc_get_settings,
1114 .set_settings = netvsc_set_settings,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001115 .get_rxnfc = netvsc_get_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001116 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1117 .get_rxfh_indir_size = netvsc_rss_indir_size,
1118 .get_rxfh = netvsc_get_rxfh,
1119 .set_rxfh = netvsc_set_rxfh,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001120};
1121
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001122static const struct net_device_ops device_ops = {
1123 .ndo_open = netvsc_open,
1124 .ndo_stop = netvsc_close,
1125 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001126 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001127 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001128 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001129 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001130 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001131 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001132#ifdef CONFIG_NET_POLL_CONTROLLER
1133 .ndo_poll_controller = netvsc_poll_controller,
1134#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001135};
1136
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001137/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001138 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1139 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1140 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001141 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001142static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001143{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001144 struct net_device_context *ndev_ctx =
1145 container_of(w, struct net_device_context, dwork.work);
1146 struct hv_device *device_obj = ndev_ctx->device_ctx;
1147 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001148 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001149 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001150 struct netvsc_reconfig *event = NULL;
1151 bool notify = false, reschedule = false;
1152 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001153
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001154 rtnl_lock();
1155 if (ndev_ctx->start_remove)
1156 goto out_unlock;
1157
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001158 net_device = ndev_ctx->nvdev;
Haiyang Zhang891de742014-02-12 16:54:27 -08001159 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001160
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001161 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1162 if (time_is_after_jiffies(next_reconfig)) {
1163 /* link_watch only sends one notification with current state
1164 * per second, avoid doing reconfig more frequently. Handle
1165 * wrap around.
1166 */
1167 delay = next_reconfig - jiffies;
1168 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1169 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001170 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001171 }
1172 ndev_ctx->last_reconfig = jiffies;
1173
1174 spin_lock_irqsave(&ndev_ctx->lock, flags);
1175 if (!list_empty(&ndev_ctx->reconfig_events)) {
1176 event = list_first_entry(&ndev_ctx->reconfig_events,
1177 struct netvsc_reconfig, list);
1178 list_del(&event->list);
1179 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1180 }
1181 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1182
1183 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001184 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001185
1186 switch (event->event) {
1187 /* Only the following events are possible due to the check in
1188 * netvsc_linkstatus_callback()
1189 */
1190 case RNDIS_STATUS_MEDIA_CONNECT:
1191 if (rdev->link_state) {
1192 rdev->link_state = false;
1193 netif_carrier_on(net);
1194 netif_tx_wake_all_queues(net);
1195 } else {
1196 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001197 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001198 kfree(event);
1199 break;
1200 case RNDIS_STATUS_MEDIA_DISCONNECT:
1201 if (!rdev->link_state) {
1202 rdev->link_state = true;
1203 netif_carrier_off(net);
1204 netif_tx_stop_all_queues(net);
1205 }
1206 kfree(event);
1207 break;
1208 case RNDIS_STATUS_NETWORK_CHANGE:
1209 /* Only makes sense if carrier is present */
1210 if (!rdev->link_state) {
1211 rdev->link_state = true;
1212 netif_carrier_off(net);
1213 netif_tx_stop_all_queues(net);
1214 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1215 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001216 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001217 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1218 reschedule = true;
1219 }
1220 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001221 }
1222
1223 rtnl_unlock();
1224
1225 if (notify)
1226 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001227
1228 /* link_watch only sends one notification with current state per
1229 * second, handle next reconfig event in 2 seconds.
1230 */
1231 if (reschedule)
1232 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001233
1234 return;
1235
1236out_unlock:
1237 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001238}
1239
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001240static void netvsc_free_netdev(struct net_device *netdev)
1241{
1242 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1243
1244 free_percpu(net_device_ctx->tx_stats);
1245 free_percpu(net_device_ctx->rx_stats);
1246 free_netdev(netdev);
1247}
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001248
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001249static struct net_device *get_netvsc_bymac(const u8 *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001250{
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001251 struct net_device *dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001252
Stephen Hemminger8737caa2016-08-23 12:17:44 -07001253 ASSERT_RTNL();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001254
1255 for_each_netdev(&init_net, dev) {
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001256 if (dev->netdev_ops != &device_ops)
1257 continue; /* not a netvsc device */
1258
1259 if (ether_addr_equal(mac, dev->perm_addr))
1260 return dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001261 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001262
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001263 return NULL;
1264}
1265
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001266static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001267{
1268 struct net_device *dev;
1269
1270 ASSERT_RTNL();
1271
1272 for_each_netdev(&init_net, dev) {
1273 struct net_device_context *net_device_ctx;
1274
1275 if (dev->netdev_ops != &device_ops)
1276 continue; /* not a netvsc device */
1277
1278 net_device_ctx = netdev_priv(dev);
1279 if (net_device_ctx->nvdev == NULL)
1280 continue; /* device is removed */
1281
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001282 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001283 return dev; /* a match */
1284 }
1285
1286 return NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001287}
1288
1289static int netvsc_register_vf(struct net_device *vf_netdev)
1290{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001291 struct net_device *ndev;
1292 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001293 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001294
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001295 if (vf_netdev->addr_len != ETH_ALEN)
1296 return NOTIFY_DONE;
1297
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001298 /*
1299 * We will use the MAC address to locate the synthetic interface to
1300 * associate with the VF interface. If we don't find a matching
1301 * synthetic interface, move on.
1302 */
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001303 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001304 if (!ndev)
1305 return NOTIFY_DONE;
1306
1307 net_device_ctx = netdev_priv(ndev);
1308 netvsc_dev = net_device_ctx->nvdev;
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001309 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001310 return NOTIFY_DONE;
1311
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001312 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001313 /*
1314 * Take a reference on the module.
1315 */
1316 try_module_get(THIS_MODULE);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001317
1318 dev_hold(vf_netdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001319 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001320 return NOTIFY_OK;
1321}
1322
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001323static int netvsc_vf_up(struct net_device *vf_netdev)
1324{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001325 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001326 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001327 struct net_device_context *net_device_ctx;
1328
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001329 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001330 if (!ndev)
1331 return NOTIFY_DONE;
1332
1333 net_device_ctx = netdev_priv(ndev);
1334 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001335
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001336 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001337
1338 /*
1339 * Open the device before switching data path.
1340 */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001341 rndis_filter_open(netvsc_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001342
1343 /*
1344 * notify the host to switch the data path.
1345 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001346 netvsc_switch_datapath(ndev, true);
1347 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001348
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001349 netif_carrier_off(ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001350
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001351 /* Now notify peers through VF device. */
1352 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001353
1354 return NOTIFY_OK;
1355}
1356
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001357static int netvsc_vf_down(struct net_device *vf_netdev)
1358{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001359 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001360 struct netvsc_device *netvsc_dev;
1361 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001362
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001363 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001364 if (!ndev)
1365 return NOTIFY_DONE;
1366
1367 net_device_ctx = netdev_priv(ndev);
1368 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001369
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001370 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001371 netvsc_switch_datapath(ndev, false);
1372 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001373 rndis_filter_close(netvsc_dev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001374 netif_carrier_on(ndev);
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001375
1376 /* Now notify peers through netvsc device. */
1377 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001378
1379 return NOTIFY_OK;
1380}
1381
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001382static int netvsc_unregister_vf(struct net_device *vf_netdev)
1383{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001384 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001385 struct netvsc_device *netvsc_dev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001386 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001387
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001388 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001389 if (!ndev)
1390 return NOTIFY_DONE;
1391
1392 net_device_ctx = netdev_priv(ndev);
1393 netvsc_dev = net_device_ctx->nvdev;
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001394
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001395 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001396
1397 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001398 dev_put(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001399 module_put(THIS_MODULE);
1400 return NOTIFY_OK;
1401}
1402
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001403static int netvsc_probe(struct hv_device *dev,
1404 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001405{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001406 struct net_device *net = NULL;
1407 struct net_device_context *net_device_ctx;
1408 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001409 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001410 int ret;
1411
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001412 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1413 num_online_cpus());
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001414 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001415 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001416
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001417 netif_carrier_off(net);
1418
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001419 netvsc_init_settings(net);
1420
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001421 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001422 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001423 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1424 if (netif_msg_probe(net_device_ctx))
1425 netdev_dbg(net, "netvsc msg_enable: %d\n",
1426 net_device_ctx->msg_enable);
1427
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001428 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1429 if (!net_device_ctx->tx_stats) {
1430 free_netdev(net);
1431 return -ENOMEM;
1432 }
1433 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1434 if (!net_device_ctx->rx_stats) {
1435 free_percpu(net_device_ctx->tx_stats);
1436 free_netdev(net);
1437 return -ENOMEM;
1438 }
1439
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001440 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001441
1442 net_device_ctx->start_remove = false;
1443
Haiyang Zhang891de742014-02-12 16:54:27 -08001444 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +00001445 INIT_WORK(&net_device_ctx->work, do_set_multicast);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001446
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001447 spin_lock_init(&net_device_ctx->lock);
1448 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1449
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001450 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001451 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001452 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001453
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001454 /* We always need headroom for rndis header */
1455 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1456
Haiyang Zhang692e0842011-09-01 12:19:43 -07001457 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001458 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001459 device_info.ring_size = ring_size;
KY Srinivasane01ec212015-05-27 13:16:57 -07001460 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001461 ret = rndis_filter_device_add(dev, &device_info);
1462 if (ret != 0) {
1463 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001464 netvsc_free_netdev(net);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001465 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001466 return ret;
1467 }
1468 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1469
stephen hemminger23312a32017-01-24 13:05:59 -08001470 /* hw_features computed in rndis_filter_device_add */
1471 net->features = net->hw_features |
1472 NETIF_F_HIGHDMA | NETIF_F_SG |
1473 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1474 net->vlan_features = net->features;
1475
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001476 nvdev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001477 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1478 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001479
Jarod Wilsond0c2c992016-10-20 13:55:21 -04001480 /* MTU range: 68 - 1500 or 65521 */
1481 net->min_mtu = NETVSC_MTU_MIN;
1482 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1483 net->max_mtu = NETVSC_MTU - ETH_HLEN;
1484 else
1485 net->max_mtu = ETH_DATA_LEN;
1486
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001487 ret = register_netdev(net);
1488 if (ret != 0) {
1489 pr_err("Unable to register netdev.\n");
1490 rndis_filter_device_remove(dev);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001491 netvsc_free_netdev(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001492 }
1493
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001494 return ret;
1495}
1496
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001497static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001498{
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001499 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001500 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001501 struct netvsc_device *net_device;
1502
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001503 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001504
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001505 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001506 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001507 return 0;
1508 }
1509
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001510 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001511 net_device = ndev_ctx->nvdev;
1512
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +02001513 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1514 * removing the device.
1515 */
1516 rtnl_lock();
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001517 ndev_ctx->start_remove = true;
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +02001518 rtnl_unlock();
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001519
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001520 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +00001521 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001522
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001523 /* Stop outbound asap */
Haiyang Zhang0a282532012-02-02 07:17:59 +00001524 netif_tx_disable(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001525
1526 unregister_netdev(net);
1527
1528 /*
1529 * Call to the vsc driver to let it know that the device is being
1530 * removed
1531 */
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001532 rndis_filter_device_remove(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001533
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001534 hv_set_drvdata(dev, NULL);
1535
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001536 netvsc_free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001537 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001538}
1539
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001540static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001541 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001542 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001543 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001544};
1545
1546MODULE_DEVICE_TABLE(vmbus, id_table);
1547
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001548/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001549static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001550 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001551 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001552 .probe = netvsc_probe,
1553 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001554};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001555
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001556/*
1557 * On Hyper-V, every VF interface is matched with a corresponding
1558 * synthetic interface. The synthetic interface is presented first
1559 * to the guest. When the corresponding VF instance is registered,
1560 * we will take care of switching the data path.
1561 */
1562static int netvsc_netdev_event(struct notifier_block *this,
1563 unsigned long event, void *ptr)
1564{
1565 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1566
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001567 /* Skip our own events */
1568 if (event_dev->netdev_ops == &device_ops)
1569 return NOTIFY_DONE;
1570
1571 /* Avoid non-Ethernet type devices */
1572 if (event_dev->type != ARPHRD_ETHER)
1573 return NOTIFY_DONE;
1574
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001575 /* Avoid Vlan dev with same MAC registering as VF */
1576 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1577 return NOTIFY_DONE;
1578
1579 /* Avoid Bonding master dev with same MAC registering as VF */
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001580 if ((event_dev->priv_flags & IFF_BONDING) &&
1581 (event_dev->flags & IFF_MASTER))
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07001582 return NOTIFY_DONE;
1583
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001584 switch (event) {
1585 case NETDEV_REGISTER:
1586 return netvsc_register_vf(event_dev);
1587 case NETDEV_UNREGISTER:
1588 return netvsc_unregister_vf(event_dev);
1589 case NETDEV_UP:
1590 return netvsc_vf_up(event_dev);
1591 case NETDEV_DOWN:
1592 return netvsc_vf_down(event_dev);
1593 default:
1594 return NOTIFY_DONE;
1595 }
1596}
1597
1598static struct notifier_block netvsc_netdev_notifier = {
1599 .notifier_call = netvsc_netdev_event,
1600};
1601
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001602static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001603{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001604 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001605 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001606}
1607
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001608static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001609{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001610 int ret;
1611
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001612 if (ring_size < RING_SIZE_MIN) {
1613 ring_size = RING_SIZE_MIN;
1614 pr_info("Increased ring_size to %d (min allowed)\n",
1615 ring_size);
1616 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001617 ret = vmbus_driver_register(&netvsc_drv);
1618
1619 if (ret)
1620 return ret;
1621
1622 register_netdevice_notifier(&netvsc_netdev_notifier);
1623 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001624}
1625
Hank Janssen26c14cc2010-02-11 23:02:42 +00001626MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07001627MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001628
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001629module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001630module_exit(netvsc_drv_exit);