blob: 409b48e1e589dfdfa795a56b7369d835fa4643da [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Hank Janssenfceaf242009-07-13 15:34:54 -070043
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000044#define RING_SIZE_MIN 64
Hank Janssen99c8da02010-10-12 10:45:23 -070045static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070046module_param(ring_size, int, S_IRUGO);
47MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070048
KY Srinivasane01ec212015-05-27 13:16:57 -070049static int max_num_vrss_chns = 8;
50
Simon Xiao3f300ff2015-04-28 01:05:17 -070051static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
52 NETIF_MSG_LINK | NETIF_MSG_IFUP |
53 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
54 NETIF_MSG_TX_ERR;
55
56static int debug = -1;
57module_param(debug, int, S_IRUGO);
58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080060static void do_set_multicast(struct work_struct *w)
61{
Wenqi Ma792df872012-04-19 00:39:37 +000062 struct net_device_context *ndevctx =
63 container_of(w, struct net_device_context, work);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080064 struct netvsc_device *nvdev;
65 struct rndis_device *rdev;
66
67 nvdev = hv_get_drvdata(ndevctx->device_ctx);
Wenqi Ma792df872012-04-19 00:39:37 +000068 if (nvdev == NULL || nvdev->ndev == NULL)
69 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080070
71 rdev = nvdev->extension;
72 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000073 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080074
Wenqi Ma792df872012-04-19 00:39:37 +000075 if (nvdev->ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080076 rndis_filter_set_packet_filter(rdev,
77 NDIS_PACKET_TYPE_PROMISCUOUS);
78 else
79 rndis_filter_set_packet_filter(rdev,
80 NDIS_PACKET_TYPE_BROADCAST |
81 NDIS_PACKET_TYPE_ALL_MULTICAST |
82 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080083}
84
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070085static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070086{
Wenqi Ma792df872012-04-19 00:39:37 +000087 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080088
Wenqi Ma792df872012-04-19 00:39:37 +000089 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -070090}
91
Hank Janssenfceaf242009-07-13 15:34:54 -070092static int netvsc_open(struct net_device *net)
93{
Hank Janssenfceaf242009-07-13 15:34:54 -070094 struct net_device_context *net_device_ctx = netdev_priv(net);
K. Y. Srinivasan6bad88d2011-03-07 13:35:48 -080095 struct hv_device *device_obj = net_device_ctx->device_ctx;
Haiyang Zhang891de742014-02-12 16:54:27 -080096 struct netvsc_device *nvdev;
97 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070098 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -070099
Haiyang Zhang891de742014-02-12 16:54:27 -0800100 netif_carrier_off(net);
101
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700102 /* Open up the device */
103 ret = rndis_filter_open(device_obj);
104 if (ret != 0) {
105 netdev_err(net, "unable to open device (ret %d).\n", ret);
106 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700107 }
108
Haiyang Zhang2de85302015-07-13 13:09:16 -0700109 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700110
Haiyang Zhang891de742014-02-12 16:54:27 -0800111 nvdev = hv_get_drvdata(device_obj);
112 rdev = nvdev->extension;
113 if (!rdev->link_state)
114 netif_carrier_on(net);
115
Hank Janssenfceaf242009-07-13 15:34:54 -0700116 return ret;
117}
118
Hank Janssenfceaf242009-07-13 15:34:54 -0700119static int netvsc_close(struct net_device *net)
120{
Hank Janssenfceaf242009-07-13 15:34:54 -0700121 struct net_device_context *net_device_ctx = netdev_priv(net);
K. Y. Srinivasan6bad88d2011-03-07 13:35:48 -0800122 struct hv_device *device_obj = net_device_ctx->device_ctx;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700123 struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700124 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700125 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
126 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700127
Haiyang Zhang0a282532012-02-02 07:17:59 +0000128 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700129
Wenqi Ma792df872012-04-19 00:39:37 +0000130 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
131 cancel_work_sync(&net_device_ctx->work);
Haiyang Zhang9c26aa02010-12-10 12:03:57 -0800132 ret = rndis_filter_close(device_obj);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700133 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700134 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700135 return ret;
136 }
137
138 /* Ensure pending bytes in ring are read */
139 while (true) {
140 aread = 0;
141 for (i = 0; i < nvdev->num_chn; i++) {
142 chn = nvdev->chn_table[i];
143 if (!chn)
144 continue;
145
146 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
147 &awrite);
148
149 if (aread)
150 break;
151
152 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
153 &awrite);
154
155 if (aread)
156 break;
157 }
158
159 retry++;
160 if (retry > retry_max || aread == 0)
161 break;
162
163 msleep(msec);
164
165 if (msec < 1000)
166 msec *= 2;
167 }
168
169 if (aread) {
170 netdev_err(net, "Ring buffer not empty after closing rndis\n");
171 ret = -ETIMEDOUT;
172 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700173
Hank Janssenfceaf242009-07-13 15:34:54 -0700174 return ret;
175}
176
KY Srinivasan8a002512014-03-08 19:23:14 -0800177static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
178 int pkt_type)
179{
180 struct rndis_packet *rndis_pkt;
181 struct rndis_per_packet_info *ppi;
182
183 rndis_pkt = &msg->msg.pkt;
184 rndis_pkt->data_offset += ppi_size;
185
186 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
187 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
188
189 ppi->size = ppi_size;
190 ppi->type = pkt_type;
191 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
192
193 rndis_pkt->per_pkt_info_len += ppi_size;
194
195 return ppi;
196}
197
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700198union sub_key {
199 u64 k;
200 struct {
201 u8 pad[3];
202 u8 kb;
203 u32 ka;
204 };
205};
206
207/* Toeplitz hash function
208 * data: network byte order
209 * return: host byte order
210 */
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700211static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700212{
213 union sub_key subk;
214 int k_next = 4;
215 u8 dt;
216 int i, j;
217 u32 ret = 0;
218
219 subk.k = 0;
220 subk.ka = ntohl(*(u32 *)key);
221
222 for (i = 0; i < dlen; i++) {
223 subk.kb = key[k_next];
224 k_next = (k_next + 1) % klen;
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700225 dt = ((u8 *)data)[i];
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700226 for (j = 0; j < 8; j++) {
227 if (dt & 0x80)
228 ret ^= subk.ka;
229 dt <<= 1;
230 subk.k <<= 1;
231 }
232 }
233
234 return ret;
235}
236
237static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
238{
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700239 struct flow_keys flow;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700240 int data_len;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700241
Tom Herbertcd79a232015-09-01 09:24:27 -0700242 if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
Jiri Pirko06635a32015-05-12 14:56:16 +0200243 !(flow.basic.n_proto == htons(ETH_P_IP) ||
244 flow.basic.n_proto == htons(ETH_P_IPV6)))
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700245 return false;
246
Jiri Pirko06635a32015-05-12 14:56:16 +0200247 if (flow.basic.ip_proto == IPPROTO_TCP)
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700248 data_len = 12;
249 else
250 data_len = 8;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700251
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700252 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700253
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700254 return true;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700255}
256
257static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
258 void *accel_priv, select_queue_fallback_t fallback)
259{
260 struct net_device_context *net_device_ctx = netdev_priv(ndev);
261 struct hv_device *hdev = net_device_ctx->device_ctx;
262 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
263 u32 hash;
264 u16 q_idx = 0;
265
266 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
267 return 0;
268
Haiyang Zhang307f0992014-05-21 12:55:39 -0700269 if (netvsc_set_hash(&hash, skb)) {
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700270 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
271 ndev->real_num_tx_queues;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700272 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
273 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700274
275 return q_idx;
276}
277
Haiyang Zhangee90b812015-04-06 15:22:54 -0700278void netvsc_xmit_completion(void *context)
Hank Janssenfceaf242009-07-13 15:34:54 -0700279{
Nicolas Palix4193d4f2009-07-29 14:10:10 +0200280 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700281 struct sk_buff *skb = (struct sk_buff *)
Haiyang Zhang893f6622014-04-21 14:54:44 -0700282 (unsigned long)packet->send_completion_tid;
Hank Janssenfceaf242009-07-13 15:34:54 -0700283
KY Srinivasancbacec72015-03-29 21:08:41 -0700284 if (skb)
Hank Janssenfceaf242009-07-13 15:34:54 -0700285 dev_kfree_skb_any(skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700286}
287
KY Srinivasan54a73572014-03-08 19:23:13 -0800288static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
289 struct hv_page_buffer *pb)
290{
291 int j = 0;
292
293 /* Deal with compund pages by ignoring unused part
294 * of the page.
295 */
296 page += (offset >> PAGE_SHIFT);
297 offset &= ~PAGE_MASK;
298
299 while (len > 0) {
300 unsigned long bytes;
301
302 bytes = PAGE_SIZE - offset;
303 if (bytes > len)
304 bytes = len;
305 pb[j].pfn = page_to_pfn(page);
306 pb[j].offset = offset;
307 pb[j].len = bytes;
308
309 offset += bytes;
310 len -= bytes;
311
312 if (offset == PAGE_SIZE && len) {
313 page++;
314 offset = 0;
315 j++;
316 }
317 }
318
319 return j + 1;
320}
321
KY Srinivasan8a002512014-03-08 19:23:14 -0800322static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700323 struct hv_netvsc_packet *packet)
KY Srinivasan54a73572014-03-08 19:23:13 -0800324{
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700325 struct hv_page_buffer *pb = packet->page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800326 u32 slots_used = 0;
327 char *data = skb->data;
328 int frags = skb_shinfo(skb)->nr_frags;
329 int i;
330
331 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700332 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800333 * 2. skb linear data
334 * 3. skb fragment data
335 */
336 if (hdr != NULL)
337 slots_used += fill_pg_buf(virt_to_page(hdr),
338 offset_in_page(hdr),
339 len, &pb[slots_used]);
340
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700341 packet->rmsg_size = len;
342 packet->rmsg_pgcnt = slots_used;
343
KY Srinivasan54a73572014-03-08 19:23:13 -0800344 slots_used += fill_pg_buf(virt_to_page(data),
345 offset_in_page(data),
346 skb_headlen(skb), &pb[slots_used]);
347
348 for (i = 0; i < frags; i++) {
349 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
350
351 slots_used += fill_pg_buf(skb_frag_page(frag),
352 frag->page_offset,
353 skb_frag_size(frag), &pb[slots_used]);
354 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800355 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800356}
357
358static int count_skb_frag_slots(struct sk_buff *skb)
359{
360 int i, frags = skb_shinfo(skb)->nr_frags;
361 int pages = 0;
362
363 for (i = 0; i < frags; i++) {
364 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
365 unsigned long size = skb_frag_size(frag);
366 unsigned long offset = frag->page_offset;
367
368 /* Skip unused frames from start of page */
369 offset &= ~PAGE_MASK;
370 pages += PFN_UP(offset + size);
371 }
372 return pages;
373}
374
375static int netvsc_get_slots(struct sk_buff *skb)
376{
377 char *data = skb->data;
378 unsigned int offset = offset_in_page(data);
379 unsigned int len = skb_headlen(skb);
380 int slots;
381 int frag_slots;
382
383 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
384 frag_slots = count_skb_frag_slots(skb);
385 return slots + frag_slots;
386}
387
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800388static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
389{
390 u32 ret_val = TRANSPORT_INFO_NOT_IP;
391
392 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
393 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
394 goto not_ip;
395 }
396
397 *trans_off = skb_transport_offset(skb);
398
399 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
400 struct iphdr *iphdr = ip_hdr(skb);
401
402 if (iphdr->protocol == IPPROTO_TCP)
403 ret_val = TRANSPORT_INFO_IPV4_TCP;
404 else if (iphdr->protocol == IPPROTO_UDP)
405 ret_val = TRANSPORT_INFO_IPV4_UDP;
406 } else {
407 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
408 ret_val = TRANSPORT_INFO_IPV6_TCP;
409 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
410 ret_val = TRANSPORT_INFO_IPV6_UDP;
411 }
412
413not_ip:
414 return ret_val;
415}
416
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700417static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700418{
Hank Janssenfceaf242009-07-13 15:34:54 -0700419 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200420 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700421 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800422 unsigned int num_data_pgs;
423 struct rndis_message *rndis_msg;
424 struct rndis_packet *rndis_pkt;
425 u32 rndis_msg_size;
426 bool isvlan;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200427 bool linear = false;
KY Srinivasan8a002512014-03-08 19:23:14 -0800428 struct rndis_per_packet_info *ppi;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800429 struct ndis_tcp_ip_checksum_info *csum_info;
KY Srinivasan77bf5482014-03-08 19:23:18 -0800430 struct ndis_tcp_lso_info *lso_info;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800431 int hdr_offset;
432 u32 net_trans_info;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700433 u32 hash;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200434 u32 skb_length;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700435 u32 pkt_sz;
436 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700437 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
Hank Janssenfceaf242009-07-13 15:34:54 -0700438
KY Srinivasan54a73572014-03-08 19:23:13 -0800439 /* We will atmost need two pages to describe the rndis
440 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200441 * of pages in a single packet. If skb is scattered around
442 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800443 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200444
445check_size:
446 skb_length = skb->len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800447 num_data_pgs = netvsc_get_slots(skb) + 2;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200448 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
449 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
450 num_data_pgs, skb->len);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200451 ret = -EFAULT;
452 goto drop;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200453 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
454 if (skb_linearize(skb)) {
455 net_alert_ratelimited("failed to linearize skb\n");
456 ret = -ENOMEM;
457 goto drop;
458 }
459 linear = true;
460 goto check_size;
KY Srinivasan54a73572014-03-08 19:23:13 -0800461 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700462
Haiyang Zhang72151422015-04-06 15:22:53 -0700463 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
Stephen Hemmingerb220f5f2010-05-04 09:58:56 -0700464
KY Srinivasanb56fc3c2015-04-28 17:59:48 -0700465 ret = skb_cow_head(skb, pkt_sz);
466 if (ret) {
467 netdev_err(net, "unable to alloc hv_netvsc_packet\n");
468 ret = -ENOMEM;
469 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700470 }
KY Srinivasanb56fc3c2015-04-28 17:59:48 -0700471 /* Use the headroom for building up the packet */
472 packet = (struct hv_netvsc_packet *)skb->head;
Hank Janssenfceaf242009-07-13 15:34:54 -0700473
KY Srinivasanb08cc792015-03-29 21:08:42 -0700474 packet->status = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700475 packet->xmit_more = skb->xmit_more;
476
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000477 packet->vlan_tci = skb->vlan_tci;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700478 packet->page_buf = page_buf;
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000479
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700480 packet->q_idx = skb_get_queue_mapping(skb);
481
KY Srinivasan8a002512014-03-08 19:23:14 -0800482 packet->is_data_pkt = true;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800483 packet->total_data_buflen = skb->len;
Hank Janssenfceaf242009-07-13 15:34:54 -0700484
KY Srinivasan8a002512014-03-08 19:23:14 -0800485 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
KY Srinivasanb08cc792015-03-29 21:08:42 -0700486 sizeof(struct hv_netvsc_packet));
487
Haiyang Zhang72151422015-04-06 15:22:53 -0700488 memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700489
Bill Pemberton454f18a2009-07-27 16:47:24 -0400490 /* Set the completion routine */
Haiyang Zhang893f6622014-04-21 14:54:44 -0700491 packet->send_completion = netvsc_xmit_completion;
492 packet->send_completion_ctx = packet;
493 packet->send_completion_tid = (unsigned long)skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700494
KY Srinivasan8a002512014-03-08 19:23:14 -0800495 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
496
497 /* Add the rndis header */
498 rndis_msg = packet->rndis_msg;
499 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
500 rndis_msg->msg_len = packet->total_data_buflen;
501 rndis_pkt = &rndis_msg->msg.pkt;
502 rndis_pkt->data_offset = sizeof(struct rndis_packet);
503 rndis_pkt->data_len = packet->total_data_buflen;
504 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
505
506 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
507
Haiyang Zhang307f0992014-05-21 12:55:39 -0700508 hash = skb_get_hash_raw(skb);
509 if (hash != 0 && net->real_num_tx_queues > 1) {
510 rndis_msg_size += NDIS_HASH_PPI_SIZE;
511 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
512 NBL_HASH_VALUE);
513 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
514 }
515
KY Srinivasan8a002512014-03-08 19:23:14 -0800516 if (isvlan) {
517 struct ndis_pkt_8021q_info *vlan;
518
519 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
520 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
521 IEEE_8021Q_INFO);
522 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
523 ppi->ppi_offset);
524 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
525 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
526 VLAN_PRIO_SHIFT;
527 }
528
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800529 net_trans_info = get_net_transport_info(skb, &hdr_offset);
530 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
531 goto do_send;
532
533 /*
534 * Setup the sendside checksum offload only if this is not a
535 * GSO packet.
536 */
537 if (skb_is_gso(skb))
KY Srinivasan77bf5482014-03-08 19:23:18 -0800538 goto do_lso;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800539
KY Srinivasan22041fb2014-04-30 11:58:25 -0700540 if ((skb->ip_summed == CHECKSUM_NONE) ||
541 (skb->ip_summed == CHECKSUM_UNNECESSARY))
542 goto do_send;
543
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800544 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
545 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
546 TCPIP_CHKSUM_PKTINFO);
547
548 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
549 ppi->ppi_offset);
550
551 if (net_trans_info & (INFO_IPV4 << 16))
552 csum_info->transmit.is_ipv4 = 1;
553 else
554 csum_info->transmit.is_ipv6 = 1;
555
556 if (net_trans_info & INFO_TCP) {
557 csum_info->transmit.tcp_checksum = 1;
558 csum_info->transmit.tcp_header_offset = hdr_offset;
559 } else if (net_trans_info & INFO_UDP) {
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700560 /* UDP checksum offload is not supported on ws2008r2.
561 * Furthermore, on ws2012 and ws2012r2, there are some
562 * issues with udp checksum offload from Linux guests.
563 * (these are host issues).
564 * For now compute the checksum here.
565 */
566 struct udphdr *uh;
567 u16 udp_len;
568
569 ret = skb_cow_head(skb, 0);
570 if (ret)
571 goto drop;
572
573 uh = udp_hdr(skb);
574 udp_len = ntohs(uh->len);
575 uh->check = 0;
576 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
577 ip_hdr(skb)->daddr,
578 udp_len, IPPROTO_UDP,
579 csum_partial(uh, udp_len, 0));
580 if (uh->check == 0)
581 uh->check = CSUM_MANGLED_0;
582
583 csum_info->transmit.udp_checksum = 0;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800584 }
KY Srinivasan77bf5482014-03-08 19:23:18 -0800585 goto do_send;
586
587do_lso:
588 rndis_msg_size += NDIS_LSO_PPI_SIZE;
589 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
590 TCP_LARGESEND_PKTINFO);
591
592 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
593 ppi->ppi_offset);
594
595 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
596 if (net_trans_info & (INFO_IPV4 << 16)) {
597 lso_info->lso_v2_transmit.ip_version =
598 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
599 ip_hdr(skb)->tot_len = 0;
600 ip_hdr(skb)->check = 0;
601 tcp_hdr(skb)->check =
602 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
603 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
604 } else {
605 lso_info->lso_v2_transmit.ip_version =
606 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
607 ipv6_hdr(skb)->payload_len = 0;
608 tcp_hdr(skb)->check =
609 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
610 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
611 }
612 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
613 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800614
615do_send:
KY Srinivasan8a002512014-03-08 19:23:14 -0800616 /* Start filling in the page buffers with the rndis hdr */
617 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700618 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800619 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700620 skb, packet);
KY Srinivasan8a002512014-03-08 19:23:14 -0800621
622 ret = netvsc_send(net_device_ctx->device_ctx, packet);
623
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700624drop:
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700625 if (ret == 0) {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700626 u64_stats_update_begin(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700627 tx_stats->packets++;
628 tx_stats->bytes += skb_length;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700629 u64_stats_update_end(&tx_stats->syncp);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700630 } else {
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000631 if (ret != -EAGAIN) {
632 dev_kfree_skb_any(skb);
633 net->stats.tx_dropped++;
634 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700635 }
636
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000637 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
Hank Janssenfceaf242009-07-13 15:34:54 -0700638}
639
Hank Janssen3e189512010-03-04 22:11:00 +0000640/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700641 * netvsc_linkstatus_callback - Link up/down notification
642 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700643void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700644 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700645{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700646 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700647 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700648 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700649 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -0800650 struct rndis_device *rdev;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700651
652 net_device = hv_get_drvdata(device_obj);
Haiyang Zhang891de742014-02-12 16:54:27 -0800653 rdev = net_device->extension;
654
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700655 switch (indicate->status) {
656 case RNDIS_STATUS_MEDIA_CONNECT:
657 rdev->link_state = false;
658 break;
659 case RNDIS_STATUS_MEDIA_DISCONNECT:
660 rdev->link_state = true;
661 break;
662 case RNDIS_STATUS_NETWORK_CHANGE:
663 rdev->link_change = true;
664 break;
665 default:
666 return;
667 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800668
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700669 net = net_device->ndev;
Hank Janssenfceaf242009-07-13 15:34:54 -0700670
Haiyang Zhang891de742014-02-12 16:54:27 -0800671 if (!net || net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700672 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700673
Haiyang Zhang891de742014-02-12 16:54:27 -0800674 ndev_ctx = netdev_priv(net);
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700675 if (!rdev->link_state) {
Haiyang Zhangc4b6a2e2011-09-01 12:19:42 -0700676 schedule_delayed_work(&ndev_ctx->dwork, 0);
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700677 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700678 } else {
Haiyang Zhang891de742014-02-12 16:54:27 -0800679 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700680 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700681}
682
Hank Janssen3e189512010-03-04 22:11:00 +0000683/*
684 * netvsc_recv_callback - Callback when we receive a packet from the
685 * "wire" on the specified device.
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700686 */
K. Y. Srinivasanf79adf82011-05-12 19:34:51 -0700687int netvsc_recv_callback(struct hv_device *device_obj,
KY Srinivasane3d605e2014-03-08 19:23:16 -0800688 struct hv_netvsc_packet *packet,
689 struct ndis_tcp_ip_checksum_info *csum_info)
Hank Janssenfceaf242009-07-13 15:34:54 -0700690{
Haiyang Zhang6f4c4442012-02-05 12:13:09 +0000691 struct net_device *net;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700692 struct net_device_context *net_device_ctx;
Hank Janssenfceaf242009-07-13 15:34:54 -0700693 struct sk_buff *skb;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700694 struct netvsc_stats *rx_stats;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700695
Haiyang Zhang6f4c4442012-02-05 12:13:09 +0000696 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
Haiyang Zhanga68f9612013-12-20 16:52:31 -0800697 if (!net || net->reg_state != NETREG_REGISTERED) {
Haiyang Zhang63f69212012-10-02 05:30:23 +0000698 packet->status = NVSP_STAT_FAIL;
Hank Janssenfceaf242009-07-13 15:34:54 -0700699 return 0;
700 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700701 net_device_ctx = netdev_priv(net);
702 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
Hank Janssenfceaf242009-07-13 15:34:54 -0700703
Stephen Hemminger9495c282010-03-09 17:42:17 -0800704 /* Allocate a skb - TODO direct I/O to pages? */
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800705 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800706 if (unlikely(!skb)) {
707 ++net->stats.rx_dropped;
Haiyang Zhang63f69212012-10-02 05:30:23 +0000708 packet->status = NVSP_STAT_FAIL;
Stephen Hemminger9495c282010-03-09 17:42:17 -0800709 return 0;
710 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700711
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700712 /*
713 * Copy to skb. This copy is needed here since the memory pointed by
714 * hv_netvsc_packet cannot be deallocated
715 */
Haiyang Zhang45326342011-12-15 13:45:15 -0800716 memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
717 packet->total_data_buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700718
719 skb->protocol = eth_type_trans(skb, net);
KY Srinivasane3d605e2014-03-08 19:23:16 -0800720 if (csum_info) {
721 /* We only look at the IP checksum here.
722 * Should we be dropping the packet if checksum
723 * failed? How do we deal with other checksums - TCP/UDP?
724 */
725 if (csum_info->receive.ip_checksum_succeeded)
726 skb->ip_summed = CHECKSUM_UNNECESSARY;
727 else
728 skb->ip_summed = CHECKSUM_NONE;
729 }
730
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700731 if (packet->vlan_tci & VLAN_TAG_PRESENT)
732 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
733 packet->vlan_tci);
Hank Janssenfceaf242009-07-13 15:34:54 -0700734
Haiyang Zhang4baab262014-04-21 14:54:43 -0700735 skb_record_rx_queue(skb, packet->channel->
Haiyang Zhange565e802014-05-08 15:14:10 -0700736 offermsg.offer.sub_channel_index);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700737
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700738 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700739 rx_stats->packets++;
740 rx_stats->bytes += packet->total_data_buflen;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700741 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800742
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700743 /*
744 * Pass the skb back up. Network stack will deallocate the skb when it
Stephen Hemminger9495c282010-03-09 17:42:17 -0800745 * is done.
746 * TODO - use NAPI?
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700747 */
Stephen Hemminger9495c282010-03-09 17:42:17 -0800748 netif_rx(skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700749
Hank Janssenfceaf242009-07-13 15:34:54 -0700750 return 0;
751}
752
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700753static void netvsc_get_drvinfo(struct net_device *net,
754 struct ethtool_drvinfo *info)
755{
Jiri Pirko7826d432013-01-06 00:44:26 +0000756 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000757 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700758}
759
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800760static void netvsc_get_channels(struct net_device *net,
761 struct ethtool_channels *channel)
762{
763 struct net_device_context *net_device_ctx = netdev_priv(net);
764 struct hv_device *dev = net_device_ctx->device_ctx;
765 struct netvsc_device *nvdev = hv_get_drvdata(dev);
766
767 if (nvdev) {
768 channel->max_combined = nvdev->max_chn;
769 channel->combined_count = nvdev->num_chn;
770 }
771}
772
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700773static int netvsc_set_channels(struct net_device *net,
774 struct ethtool_channels *channels)
775{
776 struct net_device_context *net_device_ctx = netdev_priv(net);
777 struct hv_device *dev = net_device_ctx->device_ctx;
778 struct netvsc_device *nvdev = hv_get_drvdata(dev);
779 struct netvsc_device_info device_info;
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700780 u32 num_chn;
781 u32 max_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700782 int ret = 0;
783 bool recovering = false;
784
785 if (!nvdev || nvdev->destroy)
786 return -ENODEV;
787
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700788 num_chn = nvdev->num_chn;
789 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
790
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700791 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
792 pr_info("vRSS unsupported before NVSP Version 5\n");
793 return -EINVAL;
794 }
795
796 /* We do not support rx, tx, or other */
797 if (!channels ||
798 channels->rx_count ||
799 channels->tx_count ||
800 channels->other_count ||
801 (channels->combined_count < 1))
802 return -EINVAL;
803
804 if (channels->combined_count > max_chn) {
805 pr_info("combined channels too high, using %d\n", max_chn);
806 channels->combined_count = max_chn;
807 }
808
809 ret = netvsc_close(net);
810 if (ret)
811 goto out;
812
813 do_set:
814 nvdev->start_remove = true;
815 rndis_filter_device_remove(dev);
816
817 nvdev->num_chn = channels->combined_count;
818
819 net_device_ctx->device_ctx = dev;
820 hv_set_drvdata(dev, net);
821
822 memset(&device_info, 0, sizeof(device_info));
823 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
824 device_info.ring_size = ring_size;
825 device_info.max_num_vrss_chns = max_num_vrss_chns;
826
827 ret = rndis_filter_device_add(dev, &device_info);
828 if (ret) {
829 if (recovering) {
830 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
831 return ret;
832 }
833 goto recover;
834 }
835
836 nvdev = hv_get_drvdata(dev);
837
838 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
839 if (ret) {
840 if (recovering) {
841 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
842 return ret;
843 }
844 goto recover;
845 }
846
847 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
848 if (ret) {
849 if (recovering) {
850 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
851 return ret;
852 }
853 goto recover;
854 }
855
856 out:
857 netvsc_open(net);
858
859 return ret;
860
861 recover:
862 /* If the above failed, we attempt to recover through the same
863 * process but with the original number of channels.
864 */
865 netdev_err(net, "could not set channels, recovering\n");
866 recovering = true;
867 channels->combined_count = num_chn;
868 goto do_set;
869}
870
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800871static int netvsc_change_mtu(struct net_device *ndev, int mtu)
872{
873 struct net_device_context *ndevctx = netdev_priv(ndev);
874 struct hv_device *hdev = ndevctx->device_ctx;
875 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
876 struct netvsc_device_info device_info;
877 int limit = ETH_DATA_LEN;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700878 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800879
880 if (nvdev == NULL || nvdev->destroy)
881 return -ENODEV;
882
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800883 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
Haiyang Zhang4d3c9d32014-11-12 14:07:44 -0800884 limit = NETVSC_MTU - ETH_HLEN;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800885
Haiyang Zhangf9cbce32015-07-06 14:11:37 -0700886 if (mtu < NETVSC_MTU_MIN || mtu > limit)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800887 return -EINVAL;
888
Haiyang Zhang2de85302015-07-13 13:09:16 -0700889 ret = netvsc_close(ndev);
890 if (ret)
891 goto out;
892
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800893 nvdev->start_remove = true;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800894 rndis_filter_device_remove(hdev);
895
896 ndev->mtu = mtu;
897
898 ndevctx->device_ctx = hdev;
899 hv_set_drvdata(hdev, ndev);
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700900
901 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800902 device_info.ring_size = ring_size;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700903 device_info.num_chn = nvdev->num_chn;
KY Srinivasane01ec212015-05-27 13:16:57 -0700904 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800905 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800906
Haiyang Zhang2de85302015-07-13 13:09:16 -0700907out:
908 netvsc_open(ndev);
909
910 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800911}
912
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700913static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
914 struct rtnl_link_stats64 *t)
915{
916 struct net_device_context *ndev_ctx = netdev_priv(net);
917 int cpu;
918
919 for_each_possible_cpu(cpu) {
920 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
921 cpu);
922 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
923 cpu);
924 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
925 unsigned int start;
926
927 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700928 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700929 tx_packets = tx_stats->packets;
930 tx_bytes = tx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700931 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700932
933 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700934 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700935 rx_packets = rx_stats->packets;
936 rx_bytes = rx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700937 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700938
939 t->tx_bytes += tx_bytes;
940 t->tx_packets += tx_packets;
941 t->rx_bytes += rx_bytes;
942 t->rx_packets += rx_packets;
943 }
944
945 t->tx_dropped = net->stats.tx_dropped;
946 t->tx_errors = net->stats.tx_dropped;
947
948 t->rx_dropped = net->stats.rx_dropped;
949 t->rx_errors = net->stats.rx_errors;
950
951 return t;
952}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000953
954static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
955{
956 struct net_device_context *ndevctx = netdev_priv(ndev);
957 struct hv_device *hdev = ndevctx->device_ctx;
958 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000959 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000960 unsigned char save_aatype;
961 int err;
962
963 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
964 save_aatype = ndev->addr_assign_type;
965
966 err = eth_mac_addr(ndev, p);
967 if (err != 0)
968 return err;
969
970 err = rndis_filter_set_device_mac(hdev, addr->sa_data);
971 if (err != 0) {
972 /* roll back to saved MAC */
973 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
974 ndev->addr_assign_type = save_aatype;
975 }
976
977 return err;
978}
979
Richard Weinberger316158f2014-07-09 16:23:59 +0200980#ifdef CONFIG_NET_POLL_CONTROLLER
981static void netvsc_poll_controller(struct net_device *net)
982{
983 /* As netvsc_start_xmit() works synchronous we don't have to
984 * trigger anything here.
985 */
986}
987#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000988
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700989static const struct ethtool_ops ethtool_ops = {
990 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700991 .get_link = ethtool_op_get_link,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800992 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700993 .set_channels = netvsc_set_channels,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700994};
995
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700996static const struct net_device_ops device_ops = {
997 .ndo_open = netvsc_open,
998 .ndo_stop = netvsc_close,
999 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001000 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001001 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001002 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001003 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001004 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001005 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001006#ifdef CONFIG_NET_POLL_CONTROLLER
1007 .ndo_poll_controller = netvsc_poll_controller,
1008#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001009};
1010
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001011/*
1012 * Send GARP packet to network peers after migrations.
1013 * After Quick Migration, the network is not immediately operational in the
1014 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001015 * another netif_notify_peers() into a delayed work, otherwise GARP packet
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001016 * will not be sent after quick migration, and cause network disconnection.
Haiyang Zhang891de742014-02-12 16:54:27 -08001017 * Also, we update the carrier status here.
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001018 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001019static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001020{
1021 struct net_device_context *ndev_ctx;
1022 struct net_device *net;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001023 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001024 struct rndis_device *rdev;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001025 bool notify, refresh = false;
1026 char *argv[] = { "/etc/init.d/network", "restart", NULL };
1027 char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
Haiyang Zhang891de742014-02-12 16:54:27 -08001028
1029 rtnl_lock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001030
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001031 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001032 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
Haiyang Zhang891de742014-02-12 16:54:27 -08001033 rdev = net_device->extension;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001034 net = net_device->ndev;
Haiyang Zhang891de742014-02-12 16:54:27 -08001035
1036 if (rdev->link_state) {
1037 netif_carrier_off(net);
1038 notify = false;
1039 } else {
1040 netif_carrier_on(net);
1041 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001042 if (rdev->link_change) {
1043 rdev->link_change = false;
1044 refresh = true;
1045 }
Haiyang Zhang891de742014-02-12 16:54:27 -08001046 }
1047
1048 rtnl_unlock();
1049
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001050 if (refresh)
1051 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
1052
Haiyang Zhang891de742014-02-12 16:54:27 -08001053 if (notify)
1054 netdev_notify_peers(net);
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001055}
1056
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001057static void netvsc_free_netdev(struct net_device *netdev)
1058{
1059 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1060
1061 free_percpu(net_device_ctx->tx_stats);
1062 free_percpu(net_device_ctx->rx_stats);
1063 free_netdev(netdev);
1064}
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001065
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001066static int netvsc_probe(struct hv_device *dev,
1067 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001068{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001069 struct net_device *net = NULL;
1070 struct net_device_context *net_device_ctx;
1071 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001072 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001073 int ret;
KY Srinivasanb08cc792015-03-29 21:08:42 -07001074 u32 max_needed_headroom;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001075
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001076 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1077 num_online_cpus());
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001078 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001079 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001080
KY Srinivasanb08cc792015-03-29 21:08:42 -07001081 max_needed_headroom = sizeof(struct hv_netvsc_packet) +
Haiyang Zhang72151422015-04-06 15:22:53 -07001082 RNDIS_AND_PPI_SIZE;
KY Srinivasanb08cc792015-03-29 21:08:42 -07001083
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001084 netif_carrier_off(net);
1085
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001086 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001087 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001088 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1089 if (netif_msg_probe(net_device_ctx))
1090 netdev_dbg(net, "netvsc msg_enable: %d\n",
1091 net_device_ctx->msg_enable);
1092
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001093 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1094 if (!net_device_ctx->tx_stats) {
1095 free_netdev(net);
1096 return -ENOMEM;
1097 }
1098 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1099 if (!net_device_ctx->rx_stats) {
1100 free_percpu(net_device_ctx->tx_stats);
1101 free_netdev(net);
1102 return -ENOMEM;
1103 }
1104
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001105 hv_set_drvdata(dev, net);
Haiyang Zhang891de742014-02-12 16:54:27 -08001106 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +00001107 INIT_WORK(&net_device_ctx->work, do_set_multicast);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001108
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001109 net->netdev_ops = &device_ops;
1110
KY Srinivasan77bf5482014-03-08 19:23:18 -08001111 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
1112 NETIF_F_TSO;
KY Srinivasan08cd04b2014-03-08 19:23:17 -08001113 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
KY Srinivasan77bf5482014-03-08 19:23:18 -08001114 NETIF_F_IP_CSUM | NETIF_F_TSO;
Stephen Hemminger60487182010-05-04 09:58:55 -07001115
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001116 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001117 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001118
KY Srinivasanb08cc792015-03-29 21:08:42 -07001119 /*
1120 * Request additional head room in the skb.
1121 * We will use this space to build the rndis
1122 * heaser and other state we need to maintain.
1123 */
1124 net->needed_headroom = max_needed_headroom;
1125
Haiyang Zhang692e0842011-09-01 12:19:43 -07001126 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001127 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001128 device_info.ring_size = ring_size;
KY Srinivasane01ec212015-05-27 13:16:57 -07001129 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001130 ret = rndis_filter_device_add(dev, &device_info);
1131 if (ret != 0) {
1132 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001133 netvsc_free_netdev(net);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001134 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001135 return ret;
1136 }
1137 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1138
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001139 nvdev = hv_get_drvdata(dev);
1140 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1141 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001142
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001143 ret = register_netdev(net);
1144 if (ret != 0) {
1145 pr_err("Unable to register netdev.\n");
1146 rndis_filter_device_remove(dev);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001147 netvsc_free_netdev(net);
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001148 } else {
1149 schedule_delayed_work(&net_device_ctx->dwork, 0);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001150 }
1151
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001152 return ret;
1153}
1154
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001155static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001156{
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001157 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001158 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001159 struct netvsc_device *net_device;
1160
1161 net_device = hv_get_drvdata(dev);
1162 net = net_device->ndev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001163
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001164 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001165 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001166 return 0;
1167 }
1168
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001169 net_device->start_remove = true;
1170
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001171 ndev_ctx = netdev_priv(net);
1172 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +00001173 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001174
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001175 /* Stop outbound asap */
Haiyang Zhang0a282532012-02-02 07:17:59 +00001176 netif_tx_disable(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001177
1178 unregister_netdev(net);
1179
1180 /*
1181 * Call to the vsc driver to let it know that the device is being
1182 * removed
1183 */
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001184 rndis_filter_device_remove(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001185
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001186 netvsc_free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001187 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001188}
1189
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001190static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001191 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001192 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001193 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001194};
1195
1196MODULE_DEVICE_TABLE(vmbus, id_table);
1197
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001198/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001199static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001200 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001201 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001202 .probe = netvsc_probe,
1203 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001204};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001205
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001206static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001207{
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001208 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001209}
1210
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001211static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001212{
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001213 if (ring_size < RING_SIZE_MIN) {
1214 ring_size = RING_SIZE_MIN;
1215 pr_info("Increased ring_size to %d (min allowed)\n",
1216 ring_size);
1217 }
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001218 return vmbus_driver_register(&netvsc_drv);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001219}
1220
Hank Janssen26c14cc2010-02-11 23:02:42 +00001221MODULE_LICENSE("GPL");
Stephen Hemminger7880fc54c2010-05-04 09:58:52 -07001222MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001223
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001224module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001225module_exit(netvsc_drv_exit);