blob: 787a20235e5caa16063aecef89c3b6a335a1d628 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Hank Janssenfceaf242009-07-13 15:34:54 -070043
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000044#define RING_SIZE_MIN 64
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010045#define LINKCHANGE_INT (2 * HZ)
sixiao@microsoft.coma0606792016-02-04 15:49:34 -080046#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
47 NETIF_F_SG | \
48 NETIF_F_TSO | \
49 NETIF_F_TSO6 | \
50 NETIF_F_HW_CSUM)
Hank Janssen99c8da02010-10-12 10:45:23 -070051static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070052module_param(ring_size, int, S_IRUGO);
53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070054
KY Srinivasane01ec212015-05-27 13:16:57 -070055static int max_num_vrss_chns = 8;
56
Simon Xiao3f300ff2015-04-28 01:05:17 -070057static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
58 NETIF_MSG_LINK | NETIF_MSG_IFUP |
59 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR;
61
62static int debug = -1;
63module_param(debug, int, S_IRUGO);
64MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
65
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080066static void do_set_multicast(struct work_struct *w)
67{
Wenqi Ma792df872012-04-19 00:39:37 +000068 struct net_device_context *ndevctx =
69 container_of(w, struct net_device_context, work);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020070 struct hv_device *device_obj = ndevctx->device_ctx;
71 struct net_device *ndev = hv_get_drvdata(device_obj);
72 struct netvsc_device *nvdev = ndevctx->nvdev;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080073 struct rndis_device *rdev;
74
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020075 if (!nvdev)
Wenqi Ma792df872012-04-19 00:39:37 +000076 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080077
78 rdev = nvdev->extension;
79 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000080 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080081
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020082 if (ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080083 rndis_filter_set_packet_filter(rdev,
84 NDIS_PACKET_TYPE_PROMISCUOUS);
85 else
86 rndis_filter_set_packet_filter(rdev,
87 NDIS_PACKET_TYPE_BROADCAST |
88 NDIS_PACKET_TYPE_ALL_MULTICAST |
89 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080090}
91
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070092static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070093{
Wenqi Ma792df872012-04-19 00:39:37 +000094 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080095
Wenqi Ma792df872012-04-19 00:39:37 +000096 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -070097}
98
Hank Janssenfceaf242009-07-13 15:34:54 -070099static int netvsc_open(struct net_device *net)
100{
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200101 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
Haiyang Zhang891de742014-02-12 16:54:27 -0800102 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700103 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700104
Haiyang Zhang891de742014-02-12 16:54:27 -0800105 netif_carrier_off(net);
106
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700107 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200108 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700109 if (ret != 0) {
110 netdev_err(net, "unable to open device (ret %d).\n", ret);
111 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700112 }
113
Haiyang Zhang2de85302015-07-13 13:09:16 -0700114 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700115
Haiyang Zhang891de742014-02-12 16:54:27 -0800116 rdev = nvdev->extension;
117 if (!rdev->link_state)
118 netif_carrier_on(net);
119
Hank Janssenfceaf242009-07-13 15:34:54 -0700120 return ret;
121}
122
Hank Janssenfceaf242009-07-13 15:34:54 -0700123static int netvsc_close(struct net_device *net)
124{
Hank Janssenfceaf242009-07-13 15:34:54 -0700125 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200126 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700127 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700128 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
129 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700130
Haiyang Zhang0a282532012-02-02 07:17:59 +0000131 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700132
Wenqi Ma792df872012-04-19 00:39:37 +0000133 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
134 cancel_work_sync(&net_device_ctx->work);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200135 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700136 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700137 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700138 return ret;
139 }
140
141 /* Ensure pending bytes in ring are read */
142 while (true) {
143 aread = 0;
144 for (i = 0; i < nvdev->num_chn; i++) {
145 chn = nvdev->chn_table[i];
146 if (!chn)
147 continue;
148
149 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
150 &awrite);
151
152 if (aread)
153 break;
154
155 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
156 &awrite);
157
158 if (aread)
159 break;
160 }
161
162 retry++;
163 if (retry > retry_max || aread == 0)
164 break;
165
166 msleep(msec);
167
168 if (msec < 1000)
169 msec *= 2;
170 }
171
172 if (aread) {
173 netdev_err(net, "Ring buffer not empty after closing rndis\n");
174 ret = -ETIMEDOUT;
175 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700176
Hank Janssenfceaf242009-07-13 15:34:54 -0700177 return ret;
178}
179
KY Srinivasan8a002512014-03-08 19:23:14 -0800180static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
181 int pkt_type)
182{
183 struct rndis_packet *rndis_pkt;
184 struct rndis_per_packet_info *ppi;
185
186 rndis_pkt = &msg->msg.pkt;
187 rndis_pkt->data_offset += ppi_size;
188
189 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
190 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
191
192 ppi->size = ppi_size;
193 ppi->type = pkt_type;
194 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
195
196 rndis_pkt->per_pkt_info_len += ppi_size;
197
198 return ppi;
199}
200
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700201static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
202 void *accel_priv, select_queue_fallback_t fallback)
203{
204 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200205 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700206 u32 hash;
207 u16 q_idx = 0;
208
209 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
210 return 0;
211
Vitaly Kuznetsov757647e2016-01-25 16:00:41 +0100212 hash = skb_get_hash(skb);
213 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
214 ndev->real_num_tx_queues;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700215
Vitaly Kuznetsov8b9fbe12015-12-01 16:43:11 -0800216 if (!nvsc_dev->chn_table[q_idx])
217 q_idx = 0;
218
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700219 return q_idx;
220}
221
KY Srinivasan54a73572014-03-08 19:23:13 -0800222static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
223 struct hv_page_buffer *pb)
224{
225 int j = 0;
226
227 /* Deal with compund pages by ignoring unused part
228 * of the page.
229 */
230 page += (offset >> PAGE_SHIFT);
231 offset &= ~PAGE_MASK;
232
233 while (len > 0) {
234 unsigned long bytes;
235
236 bytes = PAGE_SIZE - offset;
237 if (bytes > len)
238 bytes = len;
239 pb[j].pfn = page_to_pfn(page);
240 pb[j].offset = offset;
241 pb[j].len = bytes;
242
243 offset += bytes;
244 len -= bytes;
245
246 if (offset == PAGE_SIZE && len) {
247 page++;
248 offset = 0;
249 j++;
250 }
251 }
252
253 return j + 1;
254}
255
KY Srinivasan8a002512014-03-08 19:23:14 -0800256static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800257 struct hv_netvsc_packet *packet,
258 struct hv_page_buffer **page_buf)
KY Srinivasan54a73572014-03-08 19:23:13 -0800259{
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800260 struct hv_page_buffer *pb = *page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800261 u32 slots_used = 0;
262 char *data = skb->data;
263 int frags = skb_shinfo(skb)->nr_frags;
264 int i;
265
266 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700267 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800268 * 2. skb linear data
269 * 3. skb fragment data
270 */
271 if (hdr != NULL)
272 slots_used += fill_pg_buf(virt_to_page(hdr),
273 offset_in_page(hdr),
274 len, &pb[slots_used]);
275
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700276 packet->rmsg_size = len;
277 packet->rmsg_pgcnt = slots_used;
278
KY Srinivasan54a73572014-03-08 19:23:13 -0800279 slots_used += fill_pg_buf(virt_to_page(data),
280 offset_in_page(data),
281 skb_headlen(skb), &pb[slots_used]);
282
283 for (i = 0; i < frags; i++) {
284 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
285
286 slots_used += fill_pg_buf(skb_frag_page(frag),
287 frag->page_offset,
288 skb_frag_size(frag), &pb[slots_used]);
289 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800290 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800291}
292
293static int count_skb_frag_slots(struct sk_buff *skb)
294{
295 int i, frags = skb_shinfo(skb)->nr_frags;
296 int pages = 0;
297
298 for (i = 0; i < frags; i++) {
299 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
300 unsigned long size = skb_frag_size(frag);
301 unsigned long offset = frag->page_offset;
302
303 /* Skip unused frames from start of page */
304 offset &= ~PAGE_MASK;
305 pages += PFN_UP(offset + size);
306 }
307 return pages;
308}
309
310static int netvsc_get_slots(struct sk_buff *skb)
311{
312 char *data = skb->data;
313 unsigned int offset = offset_in_page(data);
314 unsigned int len = skb_headlen(skb);
315 int slots;
316 int frag_slots;
317
318 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
319 frag_slots = count_skb_frag_slots(skb);
320 return slots + frag_slots;
321}
322
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800323static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
324{
325 u32 ret_val = TRANSPORT_INFO_NOT_IP;
326
327 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
328 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
329 goto not_ip;
330 }
331
332 *trans_off = skb_transport_offset(skb);
333
334 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
335 struct iphdr *iphdr = ip_hdr(skb);
336
337 if (iphdr->protocol == IPPROTO_TCP)
338 ret_val = TRANSPORT_INFO_IPV4_TCP;
339 else if (iphdr->protocol == IPPROTO_UDP)
340 ret_val = TRANSPORT_INFO_IPV4_UDP;
341 } else {
342 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
343 ret_val = TRANSPORT_INFO_IPV6_TCP;
344 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
345 ret_val = TRANSPORT_INFO_IPV6_UDP;
346 }
347
348not_ip:
349 return ret_val;
350}
351
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700352static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700353{
Hank Janssenfceaf242009-07-13 15:34:54 -0700354 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200355 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700356 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800357 unsigned int num_data_pgs;
358 struct rndis_message *rndis_msg;
359 struct rndis_packet *rndis_pkt;
360 u32 rndis_msg_size;
361 bool isvlan;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200362 bool linear = false;
KY Srinivasan8a002512014-03-08 19:23:14 -0800363 struct rndis_per_packet_info *ppi;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800364 struct ndis_tcp_ip_checksum_info *csum_info;
KY Srinivasan77bf5482014-03-08 19:23:18 -0800365 struct ndis_tcp_lso_info *lso_info;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800366 int hdr_offset;
367 u32 net_trans_info;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700368 u32 hash;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200369 u32 skb_length;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700370 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800371 struct hv_page_buffer *pb = page_buf;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700372 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
Hank Janssenfceaf242009-07-13 15:34:54 -0700373
KY Srinivasan54a73572014-03-08 19:23:13 -0800374 /* We will atmost need two pages to describe the rndis
375 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200376 * of pages in a single packet. If skb is scattered around
377 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800378 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200379
380check_size:
381 skb_length = skb->len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800382 num_data_pgs = netvsc_get_slots(skb) + 2;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200383 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
384 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
385 num_data_pgs, skb->len);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200386 ret = -EFAULT;
387 goto drop;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200388 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
389 if (skb_linearize(skb)) {
390 net_alert_ratelimited("failed to linearize skb\n");
391 ret = -ENOMEM;
392 goto drop;
393 }
394 linear = true;
395 goto check_size;
KY Srinivasan54a73572014-03-08 19:23:13 -0800396 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700397
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800398 /*
399 * Place the rndis header in the skb head room and
400 * the skb->cb will be used for hv_netvsc_packet
401 * structure.
402 */
403 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
KY Srinivasanb56fc3c2015-04-28 17:59:48 -0700404 if (ret) {
405 netdev_err(net, "unable to alloc hv_netvsc_packet\n");
406 ret = -ENOMEM;
407 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700408 }
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800409 /* Use the skb control buffer for building up the packet */
410 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
411 FIELD_SIZEOF(struct sk_buff, cb));
412 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700413
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000414
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700415 packet->q_idx = skb_get_queue_mapping(skb);
416
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800417 packet->total_data_buflen = skb->len;
Hank Janssenfceaf242009-07-13 15:34:54 -0700418
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800419 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700420
KY Srinivasan24476762015-12-01 16:43:06 -0800421 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700422
KY Srinivasan760d1e32015-12-01 16:43:19 -0800423 isvlan = skb->vlan_tci & VLAN_TAG_PRESENT;
KY Srinivasan8a002512014-03-08 19:23:14 -0800424
425 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800426 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
427 rndis_msg->msg_len = packet->total_data_buflen;
428 rndis_pkt = &rndis_msg->msg.pkt;
429 rndis_pkt->data_offset = sizeof(struct rndis_packet);
430 rndis_pkt->data_len = packet->total_data_buflen;
431 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
432
433 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
434
Haiyang Zhang307f0992014-05-21 12:55:39 -0700435 hash = skb_get_hash_raw(skb);
436 if (hash != 0 && net->real_num_tx_queues > 1) {
437 rndis_msg_size += NDIS_HASH_PPI_SIZE;
438 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
439 NBL_HASH_VALUE);
440 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
441 }
442
KY Srinivasan8a002512014-03-08 19:23:14 -0800443 if (isvlan) {
444 struct ndis_pkt_8021q_info *vlan;
445
446 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
447 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
448 IEEE_8021Q_INFO);
449 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
450 ppi->ppi_offset);
KY Srinivasan760d1e32015-12-01 16:43:19 -0800451 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
452 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800453 VLAN_PRIO_SHIFT;
454 }
455
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800456 net_trans_info = get_net_transport_info(skb, &hdr_offset);
457 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
458 goto do_send;
459
460 /*
461 * Setup the sendside checksum offload only if this is not a
462 * GSO packet.
463 */
464 if (skb_is_gso(skb))
KY Srinivasan77bf5482014-03-08 19:23:18 -0800465 goto do_lso;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800466
KY Srinivasan22041fb2014-04-30 11:58:25 -0700467 if ((skb->ip_summed == CHECKSUM_NONE) ||
468 (skb->ip_summed == CHECKSUM_UNNECESSARY))
469 goto do_send;
470
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800471 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
472 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
473 TCPIP_CHKSUM_PKTINFO);
474
475 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
476 ppi->ppi_offset);
477
478 if (net_trans_info & (INFO_IPV4 << 16))
479 csum_info->transmit.is_ipv4 = 1;
480 else
481 csum_info->transmit.is_ipv6 = 1;
482
483 if (net_trans_info & INFO_TCP) {
484 csum_info->transmit.tcp_checksum = 1;
485 csum_info->transmit.tcp_header_offset = hdr_offset;
486 } else if (net_trans_info & INFO_UDP) {
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700487 /* UDP checksum offload is not supported on ws2008r2.
488 * Furthermore, on ws2012 and ws2012r2, there are some
489 * issues with udp checksum offload from Linux guests.
490 * (these are host issues).
491 * For now compute the checksum here.
492 */
493 struct udphdr *uh;
494 u16 udp_len;
495
496 ret = skb_cow_head(skb, 0);
497 if (ret)
498 goto drop;
499
500 uh = udp_hdr(skb);
501 udp_len = ntohs(uh->len);
502 uh->check = 0;
503 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
504 ip_hdr(skb)->daddr,
505 udp_len, IPPROTO_UDP,
506 csum_partial(uh, udp_len, 0));
507 if (uh->check == 0)
508 uh->check = CSUM_MANGLED_0;
509
510 csum_info->transmit.udp_checksum = 0;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800511 }
KY Srinivasan77bf5482014-03-08 19:23:18 -0800512 goto do_send;
513
514do_lso:
515 rndis_msg_size += NDIS_LSO_PPI_SIZE;
516 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
517 TCP_LARGESEND_PKTINFO);
518
519 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
520 ppi->ppi_offset);
521
522 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
523 if (net_trans_info & (INFO_IPV4 << 16)) {
524 lso_info->lso_v2_transmit.ip_version =
525 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
526 ip_hdr(skb)->tot_len = 0;
527 ip_hdr(skb)->check = 0;
528 tcp_hdr(skb)->check =
529 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
530 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
531 } else {
532 lso_info->lso_v2_transmit.ip_version =
533 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
534 ipv6_hdr(skb)->payload_len = 0;
535 tcp_hdr(skb)->check =
536 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
537 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
538 }
539 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
540 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800541
542do_send:
KY Srinivasan8a002512014-03-08 19:23:14 -0800543 /* Start filling in the page buffers with the rndis hdr */
544 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700545 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800546 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800547 skb, packet, &pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800548
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800549 /* timestamp packet in software */
550 skb_tx_timestamp(skb);
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800551 ret = netvsc_send(net_device_ctx->device_ctx, packet,
552 rndis_msg, &pb, skb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800553
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700554drop:
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700555 if (ret == 0) {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700556 u64_stats_update_begin(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700557 tx_stats->packets++;
558 tx_stats->bytes += skb_length;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700559 u64_stats_update_end(&tx_stats->syncp);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700560 } else {
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000561 if (ret != -EAGAIN) {
562 dev_kfree_skb_any(skb);
563 net->stats.tx_dropped++;
564 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700565 }
566
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000567 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
Hank Janssenfceaf242009-07-13 15:34:54 -0700568}
569
Hank Janssen3e189512010-03-04 22:11:00 +0000570/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700571 * netvsc_linkstatus_callback - Link up/down notification
572 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700573void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700574 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700575{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700576 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700577 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700578 struct net_device_context *ndev_ctx;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100579 struct netvsc_reconfig *event;
580 unsigned long flags;
581
582 /* Handle link change statuses only */
583 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
584 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
585 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
586 return;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700587
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200588 net = hv_get_drvdata(device_obj);
Hank Janssenfceaf242009-07-13 15:34:54 -0700589
Haiyang Zhang891de742014-02-12 16:54:27 -0800590 if (!net || net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700591 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700592
Haiyang Zhang891de742014-02-12 16:54:27 -0800593 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100594
595 event = kzalloc(sizeof(*event), GFP_ATOMIC);
596 if (!event)
597 return;
598 event->event = indicate->status;
599
600 spin_lock_irqsave(&ndev_ctx->lock, flags);
601 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
602 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
603
604 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700605}
606
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700607
608static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
KY Srinivasane3d605e2014-03-08 19:23:16 -0800609 struct hv_netvsc_packet *packet,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800610 struct ndis_tcp_ip_checksum_info *csum_info,
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700611 void *data, u16 vlan_tci)
Hank Janssenfceaf242009-07-13 15:34:54 -0700612{
Hank Janssenfceaf242009-07-13 15:34:54 -0700613 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700614
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800615 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700616 if (!skb)
617 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700618
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700619 /*
620 * Copy to skb. This copy is needed here since the memory pointed by
621 * hv_netvsc_packet cannot be deallocated
622 */
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700623 memcpy(skb_put(skb, packet->total_data_buflen), data,
624 packet->total_data_buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700625
626 skb->protocol = eth_type_trans(skb, net);
KY Srinivasane3d605e2014-03-08 19:23:16 -0800627 if (csum_info) {
628 /* We only look at the IP checksum here.
629 * Should we be dropping the packet if checksum
630 * failed? How do we deal with other checksums - TCP/UDP?
631 */
632 if (csum_info->receive.ip_checksum_succeeded)
633 skb->ip_summed = CHECKSUM_UNNECESSARY;
634 else
635 skb->ip_summed = CHECKSUM_NONE;
636 }
637
KY Srinivasan760d1e32015-12-01 16:43:19 -0800638 if (vlan_tci & VLAN_TAG_PRESENT)
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700639 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800640 vlan_tci);
Hank Janssenfceaf242009-07-13 15:34:54 -0700641
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700642 return skb;
643}
644
645/*
646 * netvsc_recv_callback - Callback when we receive a packet from the
647 * "wire" on the specified device.
648 */
649int netvsc_recv_callback(struct hv_device *device_obj,
650 struct hv_netvsc_packet *packet,
651 void **data,
652 struct ndis_tcp_ip_checksum_info *csum_info,
653 struct vmbus_channel *channel,
654 u16 vlan_tci)
655{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200656 struct net_device *net = hv_get_drvdata(device_obj);
657 struct net_device_context *net_device_ctx = netdev_priv(net);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700658 struct sk_buff *skb;
659 struct sk_buff *vf_skb;
660 struct netvsc_stats *rx_stats;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200661 struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700662 u32 bytes_recvd = packet->total_data_buflen;
663 int ret = 0;
664
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700665 if (!net || net->reg_state != NETREG_REGISTERED)
666 return NVSP_STAT_FAIL;
667
668 if (READ_ONCE(netvsc_dev->vf_inject)) {
669 atomic_inc(&netvsc_dev->vf_use_cnt);
670 if (!READ_ONCE(netvsc_dev->vf_inject)) {
671 /*
672 * We raced; just move on.
673 */
674 atomic_dec(&netvsc_dev->vf_use_cnt);
675 goto vf_injection_done;
676 }
677
678 /*
679 * Inject this packet into the VF inerface.
680 * On Hyper-V, multicast and brodcast packets
681 * are only delivered on the synthetic interface
682 * (after subjecting these to policy filters on
683 * the host). Deliver these via the VF interface
684 * in the guest.
685 */
686 vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
687 csum_info, *data, vlan_tci);
688 if (vf_skb != NULL) {
689 ++netvsc_dev->vf_netdev->stats.rx_packets;
690 netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
691 netif_receive_skb(vf_skb);
692 } else {
693 ++net->stats.rx_dropped;
694 ret = NVSP_STAT_FAIL;
695 }
696 atomic_dec(&netvsc_dev->vf_use_cnt);
697 return ret;
698 }
699
700vf_injection_done:
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700701 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
702
703 /* Allocate a skb - TODO direct I/O to pages? */
704 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
705 if (unlikely(!skb)) {
706 ++net->stats.rx_dropped;
707 return NVSP_STAT_FAIL;
708 }
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800709 skb_record_rx_queue(skb, channel->
Haiyang Zhange565e802014-05-08 15:14:10 -0700710 offermsg.offer.sub_channel_index);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700711
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700712 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700713 rx_stats->packets++;
714 rx_stats->bytes += packet->total_data_buflen;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700715 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800716
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700717 /*
718 * Pass the skb back up. Network stack will deallocate the skb when it
Stephen Hemminger9495c282010-03-09 17:42:17 -0800719 * is done.
720 * TODO - use NAPI?
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700721 */
Stephen Hemminger9495c282010-03-09 17:42:17 -0800722 netif_rx(skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700723
Hank Janssenfceaf242009-07-13 15:34:54 -0700724 return 0;
725}
726
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700727static void netvsc_get_drvinfo(struct net_device *net,
728 struct ethtool_drvinfo *info)
729{
Jiri Pirko7826d432013-01-06 00:44:26 +0000730 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000731 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700732}
733
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800734static void netvsc_get_channels(struct net_device *net,
735 struct ethtool_channels *channel)
736{
737 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200738 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800739
740 if (nvdev) {
741 channel->max_combined = nvdev->max_chn;
742 channel->combined_count = nvdev->num_chn;
743 }
744}
745
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700746static int netvsc_set_channels(struct net_device *net,
747 struct ethtool_channels *channels)
748{
749 struct net_device_context *net_device_ctx = netdev_priv(net);
750 struct hv_device *dev = net_device_ctx->device_ctx;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200751 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700752 struct netvsc_device_info device_info;
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700753 u32 num_chn;
754 u32 max_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700755 int ret = 0;
756 bool recovering = false;
757
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +0200758 if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700759 return -ENODEV;
760
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700761 num_chn = nvdev->num_chn;
762 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
763
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700764 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
765 pr_info("vRSS unsupported before NVSP Version 5\n");
766 return -EINVAL;
767 }
768
769 /* We do not support rx, tx, or other */
770 if (!channels ||
771 channels->rx_count ||
772 channels->tx_count ||
773 channels->other_count ||
774 (channels->combined_count < 1))
775 return -EINVAL;
776
777 if (channels->combined_count > max_chn) {
778 pr_info("combined channels too high, using %d\n", max_chn);
779 channels->combined_count = max_chn;
780 }
781
782 ret = netvsc_close(net);
783 if (ret)
784 goto out;
785
786 do_set:
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200787 net_device_ctx->start_remove = true;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700788 rndis_filter_device_remove(dev);
789
790 nvdev->num_chn = channels->combined_count;
791
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700792 memset(&device_info, 0, sizeof(device_info));
793 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
794 device_info.ring_size = ring_size;
795 device_info.max_num_vrss_chns = max_num_vrss_chns;
796
797 ret = rndis_filter_device_add(dev, &device_info);
798 if (ret) {
799 if (recovering) {
800 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
801 return ret;
802 }
803 goto recover;
804 }
805
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200806 nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700807
808 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
809 if (ret) {
810 if (recovering) {
811 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
812 return ret;
813 }
814 goto recover;
815 }
816
817 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
818 if (ret) {
819 if (recovering) {
820 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
821 return ret;
822 }
823 goto recover;
824 }
825
826 out:
827 netvsc_open(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200828 net_device_ctx->start_remove = false;
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200829 /* We may have missed link change notifications */
830 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700831
832 return ret;
833
834 recover:
835 /* If the above failed, we attempt to recover through the same
836 * process but with the original number of channels.
837 */
838 netdev_err(net, "could not set channels, recovering\n");
839 recovering = true;
840 channels->combined_count = num_chn;
841 goto do_set;
842}
843
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800844static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
845{
846 struct ethtool_cmd diff1 = *cmd;
847 struct ethtool_cmd diff2 = {};
848
849 ethtool_cmd_speed_set(&diff1, 0);
850 diff1.duplex = 0;
851 /* advertising and cmd are usually set */
852 diff1.advertising = 0;
853 diff1.cmd = 0;
854 /* We set port to PORT_OTHER */
855 diff2.port = PORT_OTHER;
856
857 return !memcmp(&diff1, &diff2, sizeof(diff1));
858}
859
860static void netvsc_init_settings(struct net_device *dev)
861{
862 struct net_device_context *ndc = netdev_priv(dev);
863
864 ndc->speed = SPEED_UNKNOWN;
865 ndc->duplex = DUPLEX_UNKNOWN;
866}
867
868static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
869{
870 struct net_device_context *ndc = netdev_priv(dev);
871
872 ethtool_cmd_speed_set(cmd, ndc->speed);
873 cmd->duplex = ndc->duplex;
874 cmd->port = PORT_OTHER;
875
876 return 0;
877}
878
879static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
880{
881 struct net_device_context *ndc = netdev_priv(dev);
882 u32 speed;
883
884 speed = ethtool_cmd_speed(cmd);
885 if (!ethtool_validate_speed(speed) ||
886 !ethtool_validate_duplex(cmd->duplex) ||
887 !netvsc_validate_ethtool_ss_cmd(cmd))
888 return -EINVAL;
889
890 ndc->speed = speed;
891 ndc->duplex = cmd->duplex;
892
893 return 0;
894}
895
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800896static int netvsc_change_mtu(struct net_device *ndev, int mtu)
897{
898 struct net_device_context *ndevctx = netdev_priv(ndev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200899 struct netvsc_device *nvdev = ndevctx->nvdev;
900 struct hv_device *hdev = ndevctx->device_ctx;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800901 struct netvsc_device_info device_info;
902 int limit = ETH_DATA_LEN;
Haiyang Zhangd212b462016-03-23 09:43:09 -0700903 u32 num_chn;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700904 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800905
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +0200906 if (ndevctx->start_remove || !nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800907 return -ENODEV;
908
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800909 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
Haiyang Zhang4d3c9d32014-11-12 14:07:44 -0800910 limit = NETVSC_MTU - ETH_HLEN;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800911
Haiyang Zhangf9cbce32015-07-06 14:11:37 -0700912 if (mtu < NETVSC_MTU_MIN || mtu > limit)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800913 return -EINVAL;
914
Haiyang Zhang2de85302015-07-13 13:09:16 -0700915 ret = netvsc_close(ndev);
916 if (ret)
917 goto out;
918
Haiyang Zhangd212b462016-03-23 09:43:09 -0700919 num_chn = nvdev->num_chn;
920
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200921 ndevctx->start_remove = true;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800922 rndis_filter_device_remove(hdev);
923
924 ndev->mtu = mtu;
925
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700926 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800927 device_info.ring_size = ring_size;
Haiyang Zhangd212b462016-03-23 09:43:09 -0700928 device_info.num_chn = num_chn;
KY Srinivasane01ec212015-05-27 13:16:57 -0700929 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800930 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800931
Haiyang Zhang2de85302015-07-13 13:09:16 -0700932out:
933 netvsc_open(ndev);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200934 ndevctx->start_remove = false;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700935
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200936 /* We may have missed link change notifications */
937 schedule_delayed_work(&ndevctx->dwork, 0);
938
Haiyang Zhang2de85302015-07-13 13:09:16 -0700939 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800940}
941
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700942static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
943 struct rtnl_link_stats64 *t)
944{
945 struct net_device_context *ndev_ctx = netdev_priv(net);
946 int cpu;
947
948 for_each_possible_cpu(cpu) {
949 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
950 cpu);
951 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
952 cpu);
953 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
954 unsigned int start;
955
956 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700957 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700958 tx_packets = tx_stats->packets;
959 tx_bytes = tx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700960 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700961
962 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700963 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700964 rx_packets = rx_stats->packets;
965 rx_bytes = rx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700966 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700967
968 t->tx_bytes += tx_bytes;
969 t->tx_packets += tx_packets;
970 t->rx_bytes += rx_bytes;
971 t->rx_packets += rx_packets;
972 }
973
974 t->tx_dropped = net->stats.tx_dropped;
975 t->tx_errors = net->stats.tx_dropped;
976
977 t->rx_dropped = net->stats.rx_dropped;
978 t->rx_errors = net->stats.rx_errors;
979
980 return t;
981}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000982
983static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
984{
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000985 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000986 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000987 unsigned char save_aatype;
988 int err;
989
990 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
991 save_aatype = ndev->addr_assign_type;
992
993 err = eth_mac_addr(ndev, p);
994 if (err != 0)
995 return err;
996
Vitaly Kuznetsove834da9a2016-06-03 17:51:01 +0200997 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000998 if (err != 0) {
999 /* roll back to saved MAC */
1000 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
1001 ndev->addr_assign_type = save_aatype;
1002 }
1003
1004 return err;
1005}
1006
Richard Weinberger316158f2014-07-09 16:23:59 +02001007#ifdef CONFIG_NET_POLL_CONTROLLER
1008static void netvsc_poll_controller(struct net_device *net)
1009{
1010 /* As netvsc_start_xmit() works synchronous we don't have to
1011 * trigger anything here.
1012 */
1013}
1014#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001015
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001016static const struct ethtool_ops ethtool_ops = {
1017 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001018 .get_link = ethtool_op_get_link,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001019 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001020 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001021 .get_ts_info = ethtool_op_get_ts_info,
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001022 .get_settings = netvsc_get_settings,
1023 .set_settings = netvsc_set_settings,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001024};
1025
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001026static const struct net_device_ops device_ops = {
1027 .ndo_open = netvsc_open,
1028 .ndo_stop = netvsc_close,
1029 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001030 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001031 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001032 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001033 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001034 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001035 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001036#ifdef CONFIG_NET_POLL_CONTROLLER
1037 .ndo_poll_controller = netvsc_poll_controller,
1038#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001039};
1040
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001041/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001042 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1043 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1044 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001045 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001046static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001047{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001048 struct net_device_context *ndev_ctx =
1049 container_of(w, struct net_device_context, dwork.work);
1050 struct hv_device *device_obj = ndev_ctx->device_ctx;
1051 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001052 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001053 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001054 struct netvsc_reconfig *event = NULL;
1055 bool notify = false, reschedule = false;
1056 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001057
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001058 rtnl_lock();
1059 if (ndev_ctx->start_remove)
1060 goto out_unlock;
1061
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001062 net_device = ndev_ctx->nvdev;
Haiyang Zhang891de742014-02-12 16:54:27 -08001063 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001064
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001065 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1066 if (time_is_after_jiffies(next_reconfig)) {
1067 /* link_watch only sends one notification with current state
1068 * per second, avoid doing reconfig more frequently. Handle
1069 * wrap around.
1070 */
1071 delay = next_reconfig - jiffies;
1072 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1073 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001074 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001075 }
1076 ndev_ctx->last_reconfig = jiffies;
1077
1078 spin_lock_irqsave(&ndev_ctx->lock, flags);
1079 if (!list_empty(&ndev_ctx->reconfig_events)) {
1080 event = list_first_entry(&ndev_ctx->reconfig_events,
1081 struct netvsc_reconfig, list);
1082 list_del(&event->list);
1083 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1084 }
1085 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1086
1087 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001088 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001089
1090 switch (event->event) {
1091 /* Only the following events are possible due to the check in
1092 * netvsc_linkstatus_callback()
1093 */
1094 case RNDIS_STATUS_MEDIA_CONNECT:
1095 if (rdev->link_state) {
1096 rdev->link_state = false;
1097 netif_carrier_on(net);
1098 netif_tx_wake_all_queues(net);
1099 } else {
1100 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001101 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001102 kfree(event);
1103 break;
1104 case RNDIS_STATUS_MEDIA_DISCONNECT:
1105 if (!rdev->link_state) {
1106 rdev->link_state = true;
1107 netif_carrier_off(net);
1108 netif_tx_stop_all_queues(net);
1109 }
1110 kfree(event);
1111 break;
1112 case RNDIS_STATUS_NETWORK_CHANGE:
1113 /* Only makes sense if carrier is present */
1114 if (!rdev->link_state) {
1115 rdev->link_state = true;
1116 netif_carrier_off(net);
1117 netif_tx_stop_all_queues(net);
1118 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1119 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001120 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001121 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1122 reschedule = true;
1123 }
1124 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001125 }
1126
1127 rtnl_unlock();
1128
1129 if (notify)
1130 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001131
1132 /* link_watch only sends one notification with current state per
1133 * second, handle next reconfig event in 2 seconds.
1134 */
1135 if (reschedule)
1136 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001137
1138 return;
1139
1140out_unlock:
1141 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001142}
1143
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001144static void netvsc_free_netdev(struct net_device *netdev)
1145{
1146 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1147
1148 free_percpu(net_device_ctx->tx_stats);
1149 free_percpu(net_device_ctx->rx_stats);
1150 free_netdev(netdev);
1151}
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001152
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001153static void netvsc_notify_peers(struct work_struct *wrk)
1154{
1155 struct garp_wrk *gwrk;
1156
1157 gwrk = container_of(wrk, struct garp_wrk, dwrk);
1158
1159 netdev_notify_peers(gwrk->netdev);
1160
1161 atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
1162}
1163
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001164static struct net_device *get_netvsc_net_device(char *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001165{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001166 struct net_device *dev, *found = NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001167 int rtnl_locked;
1168
1169 rtnl_locked = rtnl_trylock();
1170
1171 for_each_netdev(&init_net, dev) {
1172 if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
1173 if (dev->netdev_ops != &device_ops)
1174 continue;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001175 found = dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001176 break;
1177 }
1178 }
1179 if (rtnl_locked)
1180 rtnl_unlock();
1181
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001182 return found;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001183}
1184
1185static int netvsc_register_vf(struct net_device *vf_netdev)
1186{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001187 struct net_device *ndev;
1188 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001189 struct netvsc_device *netvsc_dev;
1190 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1191
1192 if (eth_ops == NULL || eth_ops == &ethtool_ops)
1193 return NOTIFY_DONE;
1194
1195 /*
1196 * We will use the MAC address to locate the synthetic interface to
1197 * associate with the VF interface. If we don't find a matching
1198 * synthetic interface, move on.
1199 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001200 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1201 if (!ndev)
1202 return NOTIFY_DONE;
1203
1204 net_device_ctx = netdev_priv(ndev);
1205 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001206 if (netvsc_dev == NULL)
1207 return NOTIFY_DONE;
1208
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001209 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001210 /*
1211 * Take a reference on the module.
1212 */
1213 try_module_get(THIS_MODULE);
1214 netvsc_dev->vf_netdev = vf_netdev;
1215 return NOTIFY_OK;
1216}
1217
1218
1219static int netvsc_vf_up(struct net_device *vf_netdev)
1220{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001221 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001222 struct netvsc_device *netvsc_dev;
1223 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1224 struct net_device_context *net_device_ctx;
1225
1226 if (eth_ops == &ethtool_ops)
1227 return NOTIFY_DONE;
1228
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001229 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1230 if (!ndev)
1231 return NOTIFY_DONE;
1232
1233 net_device_ctx = netdev_priv(ndev);
1234 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001235
1236 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
1237 return NOTIFY_DONE;
1238
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001239 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001240 netvsc_dev->vf_inject = true;
1241
1242 /*
1243 * Open the device before switching data path.
1244 */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001245 rndis_filter_open(netvsc_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001246
1247 /*
1248 * notify the host to switch the data path.
1249 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001250 netvsc_switch_datapath(ndev, true);
1251 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001252
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001253 netif_carrier_off(ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001254
1255 /*
1256 * Now notify peers. We are scheduling work to
1257 * notify peers; take a reference to prevent
1258 * the VF interface from vanishing.
1259 */
1260 atomic_inc(&netvsc_dev->vf_use_cnt);
1261 net_device_ctx->gwrk.netdev = vf_netdev;
1262 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1263 schedule_work(&net_device_ctx->gwrk.dwrk);
1264
1265 return NOTIFY_OK;
1266}
1267
1268
1269static int netvsc_vf_down(struct net_device *vf_netdev)
1270{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001271 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001272 struct netvsc_device *netvsc_dev;
1273 struct net_device_context *net_device_ctx;
1274 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1275
1276 if (eth_ops == &ethtool_ops)
1277 return NOTIFY_DONE;
1278
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001279 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1280 if (!ndev)
1281 return NOTIFY_DONE;
1282
1283 net_device_ctx = netdev_priv(ndev);
1284 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001285
1286 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
1287 return NOTIFY_DONE;
1288
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001289 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001290 netvsc_dev->vf_inject = false;
1291 /*
1292 * Wait for currently active users to
1293 * drain out.
1294 */
1295
1296 while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
1297 udelay(50);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001298 netvsc_switch_datapath(ndev, false);
1299 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001300 rndis_filter_close(netvsc_dev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001301 netif_carrier_on(ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001302 /*
1303 * Notify peers.
1304 */
1305 atomic_inc(&netvsc_dev->vf_use_cnt);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001306 net_device_ctx->gwrk.netdev = ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001307 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1308 schedule_work(&net_device_ctx->gwrk.dwrk);
1309
1310 return NOTIFY_OK;
1311}
1312
1313
1314static int netvsc_unregister_vf(struct net_device *vf_netdev)
1315{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001316 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001317 struct netvsc_device *netvsc_dev;
1318 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001319 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001320
1321 if (eth_ops == &ethtool_ops)
1322 return NOTIFY_DONE;
1323
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001324 ndev = get_netvsc_net_device(vf_netdev->dev_addr);
1325 if (!ndev)
1326 return NOTIFY_DONE;
1327
1328 net_device_ctx = netdev_priv(ndev);
1329 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001330 if (netvsc_dev == NULL)
1331 return NOTIFY_DONE;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001332 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001333
1334 netvsc_dev->vf_netdev = NULL;
1335 module_put(THIS_MODULE);
1336 return NOTIFY_OK;
1337}
1338
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001339static int netvsc_probe(struct hv_device *dev,
1340 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001341{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001342 struct net_device *net = NULL;
1343 struct net_device_context *net_device_ctx;
1344 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001345 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001346 int ret;
1347
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001348 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1349 num_online_cpus());
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001350 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001351 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001352
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001353 netif_carrier_off(net);
1354
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001355 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001356 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001357 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1358 if (netif_msg_probe(net_device_ctx))
1359 netdev_dbg(net, "netvsc msg_enable: %d\n",
1360 net_device_ctx->msg_enable);
1361
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001362 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1363 if (!net_device_ctx->tx_stats) {
1364 free_netdev(net);
1365 return -ENOMEM;
1366 }
1367 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1368 if (!net_device_ctx->rx_stats) {
1369 free_percpu(net_device_ctx->tx_stats);
1370 free_netdev(net);
1371 return -ENOMEM;
1372 }
1373
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001374 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001375
1376 net_device_ctx->start_remove = false;
1377
Haiyang Zhang891de742014-02-12 16:54:27 -08001378 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +00001379 INIT_WORK(&net_device_ctx->work, do_set_multicast);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001380 INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001381
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001382 spin_lock_init(&net_device_ctx->lock);
1383 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1384
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001385 net->netdev_ops = &device_ops;
1386
sixiao@microsoft.coma0606792016-02-04 15:49:34 -08001387 net->hw_features = NETVSC_HW_FEATURES;
1388 net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
Stephen Hemminger60487182010-05-04 09:58:55 -07001389
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001390 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001391 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001392
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001393 /* We always need headroom for rndis header */
1394 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1395
Haiyang Zhang692e0842011-09-01 12:19:43 -07001396 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001397 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001398 device_info.ring_size = ring_size;
KY Srinivasane01ec212015-05-27 13:16:57 -07001399 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001400 ret = rndis_filter_device_add(dev, &device_info);
1401 if (ret != 0) {
1402 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001403 netvsc_free_netdev(net);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001404 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001405 return ret;
1406 }
1407 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1408
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001409 nvdev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001410 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1411 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001412
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001413 netvsc_init_settings(net);
1414
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001415 ret = register_netdev(net);
1416 if (ret != 0) {
1417 pr_err("Unable to register netdev.\n");
1418 rndis_filter_device_remove(dev);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001419 netvsc_free_netdev(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001420 }
1421
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001422 return ret;
1423}
1424
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001425static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001426{
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001427 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001428 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -07001429 struct netvsc_device *net_device;
1430
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001431 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001432
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001433 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001434 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001435 return 0;
1436 }
1437
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001438
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001439 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001440 net_device = ndev_ctx->nvdev;
1441
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +02001442 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1443 * removing the device.
1444 */
1445 rtnl_lock();
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001446 ndev_ctx->start_remove = true;
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +02001447 rtnl_unlock();
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001448
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001449 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +00001450 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001451
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001452 /* Stop outbound asap */
Haiyang Zhang0a282532012-02-02 07:17:59 +00001453 netif_tx_disable(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001454
1455 unregister_netdev(net);
1456
1457 /*
1458 * Call to the vsc driver to let it know that the device is being
1459 * removed
1460 */
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001461 rndis_filter_device_remove(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001462
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001463 hv_set_drvdata(dev, NULL);
1464
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001465 netvsc_free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001466 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001467}
1468
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001469static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001470 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001471 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001472 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001473};
1474
1475MODULE_DEVICE_TABLE(vmbus, id_table);
1476
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001477/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001478static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001479 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001480 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001481 .probe = netvsc_probe,
1482 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001483};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001484
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001485
1486/*
1487 * On Hyper-V, every VF interface is matched with a corresponding
1488 * synthetic interface. The synthetic interface is presented first
1489 * to the guest. When the corresponding VF instance is registered,
1490 * we will take care of switching the data path.
1491 */
1492static int netvsc_netdev_event(struct notifier_block *this,
1493 unsigned long event, void *ptr)
1494{
1495 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1496
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07001497 /* Avoid Vlan dev with same MAC registering as VF */
1498 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1499 return NOTIFY_DONE;
1500
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001501 switch (event) {
1502 case NETDEV_REGISTER:
1503 return netvsc_register_vf(event_dev);
1504 case NETDEV_UNREGISTER:
1505 return netvsc_unregister_vf(event_dev);
1506 case NETDEV_UP:
1507 return netvsc_vf_up(event_dev);
1508 case NETDEV_DOWN:
1509 return netvsc_vf_down(event_dev);
1510 default:
1511 return NOTIFY_DONE;
1512 }
1513}
1514
1515static struct notifier_block netvsc_netdev_notifier = {
1516 .notifier_call = netvsc_netdev_event,
1517};
1518
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001519static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001520{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001521 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001522 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001523}
1524
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001525static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001526{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001527 int ret;
1528
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001529 if (ring_size < RING_SIZE_MIN) {
1530 ring_size = RING_SIZE_MIN;
1531 pr_info("Increased ring_size to %d (min allowed)\n",
1532 ring_size);
1533 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001534 ret = vmbus_driver_register(&netvsc_drv);
1535
1536 if (ret)
1537 return ret;
1538
1539 register_netdevice_notifier(&netvsc_netdev_notifier);
1540 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001541}
1542
Hank Janssen26c14cc2010-02-11 23:02:42 +00001543MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07001544MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001545
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001546module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001547module_exit(netvsc_drv_exit);