blob: c72e5b83afdbc9ec509106ed5cb70bbc0539372a [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Hank Janssenfceaf242009-07-13 15:34:54 -070043
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000044#define RING_SIZE_MIN 64
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010045#define LINKCHANGE_INT (2 * HZ)
sixiao@microsoft.coma0606792016-02-04 15:49:34 -080046#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
47 NETIF_F_SG | \
48 NETIF_F_TSO | \
49 NETIF_F_TSO6 | \
50 NETIF_F_HW_CSUM)
Hank Janssen99c8da02010-10-12 10:45:23 -070051static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070052module_param(ring_size, int, S_IRUGO);
53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070054
KY Srinivasane01ec212015-05-27 13:16:57 -070055static int max_num_vrss_chns = 8;
56
Simon Xiao3f300ff2015-04-28 01:05:17 -070057static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
58 NETIF_MSG_LINK | NETIF_MSG_IFUP |
59 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR;
61
62static int debug = -1;
63module_param(debug, int, S_IRUGO);
64MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
65
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080066static void do_set_multicast(struct work_struct *w)
67{
Wenqi Ma792df872012-04-19 00:39:37 +000068 struct net_device_context *ndevctx =
69 container_of(w, struct net_device_context, work);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080070 struct netvsc_device *nvdev;
71 struct rndis_device *rdev;
72
73 nvdev = hv_get_drvdata(ndevctx->device_ctx);
Wenqi Ma792df872012-04-19 00:39:37 +000074 if (nvdev == NULL || nvdev->ndev == NULL)
75 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080076
77 rdev = nvdev->extension;
78 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000079 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080080
Wenqi Ma792df872012-04-19 00:39:37 +000081 if (nvdev->ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080082 rndis_filter_set_packet_filter(rdev,
83 NDIS_PACKET_TYPE_PROMISCUOUS);
84 else
85 rndis_filter_set_packet_filter(rdev,
86 NDIS_PACKET_TYPE_BROADCAST |
87 NDIS_PACKET_TYPE_ALL_MULTICAST |
88 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080089}
90
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070091static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070092{
Wenqi Ma792df872012-04-19 00:39:37 +000093 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080094
Wenqi Ma792df872012-04-19 00:39:37 +000095 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -070096}
97
Hank Janssenfceaf242009-07-13 15:34:54 -070098static int netvsc_open(struct net_device *net)
99{
Hank Janssenfceaf242009-07-13 15:34:54 -0700100 struct net_device_context *net_device_ctx = netdev_priv(net);
K. Y. Srinivasan6bad88da2011-03-07 13:35:48 -0800101 struct hv_device *device_obj = net_device_ctx->device_ctx;
Haiyang Zhang891de742014-02-12 16:54:27 -0800102 struct netvsc_device *nvdev;
103 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700104 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700105
Haiyang Zhang891de742014-02-12 16:54:27 -0800106 netif_carrier_off(net);
107
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700108 /* Open up the device */
109 ret = rndis_filter_open(device_obj);
110 if (ret != 0) {
111 netdev_err(net, "unable to open device (ret %d).\n", ret);
112 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700113 }
114
Haiyang Zhang2de85302015-07-13 13:09:16 -0700115 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700116
Haiyang Zhang891de742014-02-12 16:54:27 -0800117 nvdev = hv_get_drvdata(device_obj);
118 rdev = nvdev->extension;
119 if (!rdev->link_state)
120 netif_carrier_on(net);
121
Hank Janssenfceaf242009-07-13 15:34:54 -0700122 return ret;
123}
124
Hank Janssenfceaf242009-07-13 15:34:54 -0700125static int netvsc_close(struct net_device *net)
126{
Hank Janssenfceaf242009-07-13 15:34:54 -0700127 struct net_device_context *net_device_ctx = netdev_priv(net);
K. Y. Srinivasan6bad88da2011-03-07 13:35:48 -0800128 struct hv_device *device_obj = net_device_ctx->device_ctx;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700129 struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700130 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700131 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
132 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700133
Haiyang Zhang0a282532012-02-02 07:17:59 +0000134 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700135
Wenqi Ma792df872012-04-19 00:39:37 +0000136 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
137 cancel_work_sync(&net_device_ctx->work);
Haiyang Zhang9c26aa02010-12-10 12:03:57 -0800138 ret = rndis_filter_close(device_obj);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700139 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700140 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700141 return ret;
142 }
143
144 /* Ensure pending bytes in ring are read */
145 while (true) {
146 aread = 0;
147 for (i = 0; i < nvdev->num_chn; i++) {
148 chn = nvdev->chn_table[i];
149 if (!chn)
150 continue;
151
152 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
153 &awrite);
154
155 if (aread)
156 break;
157
158 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
159 &awrite);
160
161 if (aread)
162 break;
163 }
164
165 retry++;
166 if (retry > retry_max || aread == 0)
167 break;
168
169 msleep(msec);
170
171 if (msec < 1000)
172 msec *= 2;
173 }
174
175 if (aread) {
176 netdev_err(net, "Ring buffer not empty after closing rndis\n");
177 ret = -ETIMEDOUT;
178 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700179
Hank Janssenfceaf242009-07-13 15:34:54 -0700180 return ret;
181}
182
KY Srinivasan8a002512014-03-08 19:23:14 -0800183static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
184 int pkt_type)
185{
186 struct rndis_packet *rndis_pkt;
187 struct rndis_per_packet_info *ppi;
188
189 rndis_pkt = &msg->msg.pkt;
190 rndis_pkt->data_offset += ppi_size;
191
192 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
193 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
194
195 ppi->size = ppi_size;
196 ppi->type = pkt_type;
197 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
198
199 rndis_pkt->per_pkt_info_len += ppi_size;
200
201 return ppi;
202}
203
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700204static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
205 void *accel_priv, select_queue_fallback_t fallback)
206{
207 struct net_device_context *net_device_ctx = netdev_priv(ndev);
208 struct hv_device *hdev = net_device_ctx->device_ctx;
209 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
210 u32 hash;
211 u16 q_idx = 0;
212
213 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
214 return 0;
215
Vitaly Kuznetsov757647e2016-01-25 16:00:41 +0100216 hash = skb_get_hash(skb);
217 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
218 ndev->real_num_tx_queues;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700219
Vitaly Kuznetsov8b9fbe12015-12-01 16:43:11 -0800220 if (!nvsc_dev->chn_table[q_idx])
221 q_idx = 0;
222
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700223 return q_idx;
224}
225
KY Srinivasan54a73572014-03-08 19:23:13 -0800226static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
227 struct hv_page_buffer *pb)
228{
229 int j = 0;
230
231 /* Deal with compund pages by ignoring unused part
232 * of the page.
233 */
234 page += (offset >> PAGE_SHIFT);
235 offset &= ~PAGE_MASK;
236
237 while (len > 0) {
238 unsigned long bytes;
239
240 bytes = PAGE_SIZE - offset;
241 if (bytes > len)
242 bytes = len;
243 pb[j].pfn = page_to_pfn(page);
244 pb[j].offset = offset;
245 pb[j].len = bytes;
246
247 offset += bytes;
248 len -= bytes;
249
250 if (offset == PAGE_SIZE && len) {
251 page++;
252 offset = 0;
253 j++;
254 }
255 }
256
257 return j + 1;
258}
259
KY Srinivasan8a002512014-03-08 19:23:14 -0800260static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800261 struct hv_netvsc_packet *packet,
262 struct hv_page_buffer **page_buf)
KY Srinivasan54a73572014-03-08 19:23:13 -0800263{
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800264 struct hv_page_buffer *pb = *page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800265 u32 slots_used = 0;
266 char *data = skb->data;
267 int frags = skb_shinfo(skb)->nr_frags;
268 int i;
269
270 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700271 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800272 * 2. skb linear data
273 * 3. skb fragment data
274 */
275 if (hdr != NULL)
276 slots_used += fill_pg_buf(virt_to_page(hdr),
277 offset_in_page(hdr),
278 len, &pb[slots_used]);
279
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700280 packet->rmsg_size = len;
281 packet->rmsg_pgcnt = slots_used;
282
KY Srinivasan54a73572014-03-08 19:23:13 -0800283 slots_used += fill_pg_buf(virt_to_page(data),
284 offset_in_page(data),
285 skb_headlen(skb), &pb[slots_used]);
286
287 for (i = 0; i < frags; i++) {
288 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
289
290 slots_used += fill_pg_buf(skb_frag_page(frag),
291 frag->page_offset,
292 skb_frag_size(frag), &pb[slots_used]);
293 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800294 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800295}
296
297static int count_skb_frag_slots(struct sk_buff *skb)
298{
299 int i, frags = skb_shinfo(skb)->nr_frags;
300 int pages = 0;
301
302 for (i = 0; i < frags; i++) {
303 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
304 unsigned long size = skb_frag_size(frag);
305 unsigned long offset = frag->page_offset;
306
307 /* Skip unused frames from start of page */
308 offset &= ~PAGE_MASK;
309 pages += PFN_UP(offset + size);
310 }
311 return pages;
312}
313
314static int netvsc_get_slots(struct sk_buff *skb)
315{
316 char *data = skb->data;
317 unsigned int offset = offset_in_page(data);
318 unsigned int len = skb_headlen(skb);
319 int slots;
320 int frag_slots;
321
322 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
323 frag_slots = count_skb_frag_slots(skb);
324 return slots + frag_slots;
325}
326
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800327static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
328{
329 u32 ret_val = TRANSPORT_INFO_NOT_IP;
330
331 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
332 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
333 goto not_ip;
334 }
335
336 *trans_off = skb_transport_offset(skb);
337
338 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
339 struct iphdr *iphdr = ip_hdr(skb);
340
341 if (iphdr->protocol == IPPROTO_TCP)
342 ret_val = TRANSPORT_INFO_IPV4_TCP;
343 else if (iphdr->protocol == IPPROTO_UDP)
344 ret_val = TRANSPORT_INFO_IPV4_UDP;
345 } else {
346 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
347 ret_val = TRANSPORT_INFO_IPV6_TCP;
348 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
349 ret_val = TRANSPORT_INFO_IPV6_UDP;
350 }
351
352not_ip:
353 return ret_val;
354}
355
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700356static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700357{
Hank Janssenfceaf242009-07-13 15:34:54 -0700358 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200359 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700360 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800361 unsigned int num_data_pgs;
362 struct rndis_message *rndis_msg;
363 struct rndis_packet *rndis_pkt;
364 u32 rndis_msg_size;
365 bool isvlan;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200366 bool linear = false;
KY Srinivasan8a002512014-03-08 19:23:14 -0800367 struct rndis_per_packet_info *ppi;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800368 struct ndis_tcp_ip_checksum_info *csum_info;
KY Srinivasan77bf5482014-03-08 19:23:18 -0800369 struct ndis_tcp_lso_info *lso_info;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800370 int hdr_offset;
371 u32 net_trans_info;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700372 u32 hash;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200373 u32 skb_length;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700374 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800375 struct hv_page_buffer *pb = page_buf;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700376 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
Hank Janssenfceaf242009-07-13 15:34:54 -0700377
KY Srinivasan54a73572014-03-08 19:23:13 -0800378 /* We will atmost need two pages to describe the rndis
379 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200380 * of pages in a single packet. If skb is scattered around
381 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800382 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200383
384check_size:
385 skb_length = skb->len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800386 num_data_pgs = netvsc_get_slots(skb) + 2;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200387 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
388 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
389 num_data_pgs, skb->len);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200390 ret = -EFAULT;
391 goto drop;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200392 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
393 if (skb_linearize(skb)) {
394 net_alert_ratelimited("failed to linearize skb\n");
395 ret = -ENOMEM;
396 goto drop;
397 }
398 linear = true;
399 goto check_size;
KY Srinivasan54a73572014-03-08 19:23:13 -0800400 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700401
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800402 /*
403 * Place the rndis header in the skb head room and
404 * the skb->cb will be used for hv_netvsc_packet
405 * structure.
406 */
407 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
KY Srinivasanb56fc3c2015-04-28 17:59:48 -0700408 if (ret) {
409 netdev_err(net, "unable to alloc hv_netvsc_packet\n");
410 ret = -ENOMEM;
411 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700412 }
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800413 /* Use the skb control buffer for building up the packet */
414 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
415 FIELD_SIZEOF(struct sk_buff, cb));
416 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700417
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000418
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700419 packet->q_idx = skb_get_queue_mapping(skb);
420
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800421 packet->total_data_buflen = skb->len;
Hank Janssenfceaf242009-07-13 15:34:54 -0700422
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800423 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700424
KY Srinivasan24476762015-12-01 16:43:06 -0800425 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700426
KY Srinivasan760d1e32015-12-01 16:43:19 -0800427 isvlan = skb->vlan_tci & VLAN_TAG_PRESENT;
KY Srinivasan8a002512014-03-08 19:23:14 -0800428
429 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800430 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
431 rndis_msg->msg_len = packet->total_data_buflen;
432 rndis_pkt = &rndis_msg->msg.pkt;
433 rndis_pkt->data_offset = sizeof(struct rndis_packet);
434 rndis_pkt->data_len = packet->total_data_buflen;
435 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
436
437 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
438
Haiyang Zhang307f0992014-05-21 12:55:39 -0700439 hash = skb_get_hash_raw(skb);
440 if (hash != 0 && net->real_num_tx_queues > 1) {
441 rndis_msg_size += NDIS_HASH_PPI_SIZE;
442 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
443 NBL_HASH_VALUE);
444 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
445 }
446
KY Srinivasan8a002512014-03-08 19:23:14 -0800447 if (isvlan) {
448 struct ndis_pkt_8021q_info *vlan;
449
450 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
451 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
452 IEEE_8021Q_INFO);
453 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
454 ppi->ppi_offset);
KY Srinivasan760d1e32015-12-01 16:43:19 -0800455 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
456 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800457 VLAN_PRIO_SHIFT;
458 }
459
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800460 net_trans_info = get_net_transport_info(skb, &hdr_offset);
461 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
462 goto do_send;
463
464 /*
465 * Setup the sendside checksum offload only if this is not a
466 * GSO packet.
467 */
468 if (skb_is_gso(skb))
KY Srinivasan77bf5482014-03-08 19:23:18 -0800469 goto do_lso;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800470
KY Srinivasan22041fb2014-04-30 11:58:25 -0700471 if ((skb->ip_summed == CHECKSUM_NONE) ||
472 (skb->ip_summed == CHECKSUM_UNNECESSARY))
473 goto do_send;
474
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800475 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
476 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
477 TCPIP_CHKSUM_PKTINFO);
478
479 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
480 ppi->ppi_offset);
481
482 if (net_trans_info & (INFO_IPV4 << 16))
483 csum_info->transmit.is_ipv4 = 1;
484 else
485 csum_info->transmit.is_ipv6 = 1;
486
487 if (net_trans_info & INFO_TCP) {
488 csum_info->transmit.tcp_checksum = 1;
489 csum_info->transmit.tcp_header_offset = hdr_offset;
490 } else if (net_trans_info & INFO_UDP) {
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700491 /* UDP checksum offload is not supported on ws2008r2.
492 * Furthermore, on ws2012 and ws2012r2, there are some
493 * issues with udp checksum offload from Linux guests.
494 * (these are host issues).
495 * For now compute the checksum here.
496 */
497 struct udphdr *uh;
498 u16 udp_len;
499
500 ret = skb_cow_head(skb, 0);
501 if (ret)
502 goto drop;
503
504 uh = udp_hdr(skb);
505 udp_len = ntohs(uh->len);
506 uh->check = 0;
507 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
508 ip_hdr(skb)->daddr,
509 udp_len, IPPROTO_UDP,
510 csum_partial(uh, udp_len, 0));
511 if (uh->check == 0)
512 uh->check = CSUM_MANGLED_0;
513
514 csum_info->transmit.udp_checksum = 0;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800515 }
KY Srinivasan77bf5482014-03-08 19:23:18 -0800516 goto do_send;
517
518do_lso:
519 rndis_msg_size += NDIS_LSO_PPI_SIZE;
520 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
521 TCP_LARGESEND_PKTINFO);
522
523 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
524 ppi->ppi_offset);
525
526 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
527 if (net_trans_info & (INFO_IPV4 << 16)) {
528 lso_info->lso_v2_transmit.ip_version =
529 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
530 ip_hdr(skb)->tot_len = 0;
531 ip_hdr(skb)->check = 0;
532 tcp_hdr(skb)->check =
533 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
534 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
535 } else {
536 lso_info->lso_v2_transmit.ip_version =
537 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
538 ipv6_hdr(skb)->payload_len = 0;
539 tcp_hdr(skb)->check =
540 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
541 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
542 }
543 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
544 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800545
546do_send:
KY Srinivasan8a002512014-03-08 19:23:14 -0800547 /* Start filling in the page buffers with the rndis hdr */
548 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700549 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800550 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800551 skb, packet, &pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800552
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800553 ret = netvsc_send(net_device_ctx->device_ctx, packet,
554 rndis_msg, &pb, skb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800555
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700556drop:
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700557 if (ret == 0) {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700558 u64_stats_update_begin(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700559 tx_stats->packets++;
560 tx_stats->bytes += skb_length;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700561 u64_stats_update_end(&tx_stats->syncp);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700562 } else {
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000563 if (ret != -EAGAIN) {
564 dev_kfree_skb_any(skb);
565 net->stats.tx_dropped++;
566 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700567 }
568
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000569 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
Hank Janssenfceaf242009-07-13 15:34:54 -0700570}
571
Hank Janssen3e189512010-03-04 22:11:00 +0000572/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700573 * netvsc_linkstatus_callback - Link up/down notification
574 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700575void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700576 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700577{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700578 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700579 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700580 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700581 struct netvsc_device *net_device;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100582 struct netvsc_reconfig *event;
583 unsigned long flags;
584
585 /* Handle link change statuses only */
586 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
587 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
588 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
589 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700590
591 net_device = hv_get_drvdata(device_obj);
592 net = net_device->ndev;
Hank Janssenfceaf242009-07-13 15:34:54 -0700593
Haiyang Zhang891de742014-02-12 16:54:27 -0800594 if (!net || net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700595 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700596
Haiyang Zhang891de742014-02-12 16:54:27 -0800597 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100598
599 event = kzalloc(sizeof(*event), GFP_ATOMIC);
600 if (!event)
601 return;
602 event->event = indicate->status;
603
604 spin_lock_irqsave(&ndev_ctx->lock, flags);
605 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
606 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
607
608 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700609}
610
Hank Janssen3e189512010-03-04 22:11:00 +0000611/*
612 * netvsc_recv_callback - Callback when we receive a packet from the
613 * "wire" on the specified device.
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700614 */
K. Y. Srinivasanf79adf82011-05-12 19:34:51 -0700615int netvsc_recv_callback(struct hv_device *device_obj,
KY Srinivasane3d605e2014-03-08 19:23:16 -0800616 struct hv_netvsc_packet *packet,
KY Srinivasanc4b20c62015-12-01 16:43:07 -0800617 void **data,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800618 struct ndis_tcp_ip_checksum_info *csum_info,
KY Srinivasan760d1e32015-12-01 16:43:19 -0800619 struct vmbus_channel *channel,
620 u16 vlan_tci)
Hank Janssenfceaf242009-07-13 15:34:54 -0700621{
Haiyang Zhang6f4c4442012-02-05 12:13:09 +0000622 struct net_device *net;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700623 struct net_device_context *net_device_ctx;
Hank Janssenfceaf242009-07-13 15:34:54 -0700624 struct sk_buff *skb;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700625 struct netvsc_stats *rx_stats;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700626
Haiyang Zhang6f4c4442012-02-05 12:13:09 +0000627 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
Haiyang Zhanga68f9612013-12-20 16:52:31 -0800628 if (!net || net->reg_state != NETREG_REGISTERED) {
KY Srinivasan10082f92015-12-01 16:43:18 -0800629 return NVSP_STAT_FAIL;
Hank Janssenfceaf242009-07-13 15:34:54 -0700630 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700631 net_device_ctx = netdev_priv(net);
632 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
Hank Janssenfceaf242009-07-13 15:34:54 -0700633
Stephen Hemminger9495c282010-03-09 17:42:17 -0800634 /* Allocate a skb - TODO direct I/O to pages? */
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800635 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800636 if (unlikely(!skb)) {
637 ++net->stats.rx_dropped;
KY Srinivasan10082f92015-12-01 16:43:18 -0800638 return NVSP_STAT_FAIL;
Stephen Hemminger9495c282010-03-09 17:42:17 -0800639 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700640
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700641 /*
642 * Copy to skb. This copy is needed here since the memory pointed by
643 * hv_netvsc_packet cannot be deallocated
644 */
KY Srinivasanc4b20c62015-12-01 16:43:07 -0800645 memcpy(skb_put(skb, packet->total_data_buflen), *data,
Haiyang Zhang45326342011-12-15 13:45:15 -0800646 packet->total_data_buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700647
648 skb->protocol = eth_type_trans(skb, net);
KY Srinivasane3d605e2014-03-08 19:23:16 -0800649 if (csum_info) {
650 /* We only look at the IP checksum here.
651 * Should we be dropping the packet if checksum
652 * failed? How do we deal with other checksums - TCP/UDP?
653 */
654 if (csum_info->receive.ip_checksum_succeeded)
655 skb->ip_summed = CHECKSUM_UNNECESSARY;
656 else
657 skb->ip_summed = CHECKSUM_NONE;
658 }
659
KY Srinivasan760d1e32015-12-01 16:43:19 -0800660 if (vlan_tci & VLAN_TAG_PRESENT)
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700661 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800662 vlan_tci);
Hank Janssenfceaf242009-07-13 15:34:54 -0700663
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800664 skb_record_rx_queue(skb, channel->
Haiyang Zhange565e802014-05-08 15:14:10 -0700665 offermsg.offer.sub_channel_index);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700666
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700667 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700668 rx_stats->packets++;
669 rx_stats->bytes += packet->total_data_buflen;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700670 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800671
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700672 /*
673 * Pass the skb back up. Network stack will deallocate the skb when it
Stephen Hemminger9495c282010-03-09 17:42:17 -0800674 * is done.
675 * TODO - use NAPI?
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700676 */
Stephen Hemminger9495c282010-03-09 17:42:17 -0800677 netif_rx(skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700678
Hank Janssenfceaf242009-07-13 15:34:54 -0700679 return 0;
680}
681
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700682static void netvsc_get_drvinfo(struct net_device *net,
683 struct ethtool_drvinfo *info)
684{
Jiri Pirko7826d432013-01-06 00:44:26 +0000685 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000686 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700687}
688
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800689static void netvsc_get_channels(struct net_device *net,
690 struct ethtool_channels *channel)
691{
692 struct net_device_context *net_device_ctx = netdev_priv(net);
693 struct hv_device *dev = net_device_ctx->device_ctx;
694 struct netvsc_device *nvdev = hv_get_drvdata(dev);
695
696 if (nvdev) {
697 channel->max_combined = nvdev->max_chn;
698 channel->combined_count = nvdev->num_chn;
699 }
700}
701
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700702static int netvsc_set_channels(struct net_device *net,
703 struct ethtool_channels *channels)
704{
705 struct net_device_context *net_device_ctx = netdev_priv(net);
706 struct hv_device *dev = net_device_ctx->device_ctx;
707 struct netvsc_device *nvdev = hv_get_drvdata(dev);
708 struct netvsc_device_info device_info;
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700709 u32 num_chn;
710 u32 max_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700711 int ret = 0;
712 bool recovering = false;
713
714 if (!nvdev || nvdev->destroy)
715 return -ENODEV;
716
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700717 num_chn = nvdev->num_chn;
718 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
719
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700720 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
721 pr_info("vRSS unsupported before NVSP Version 5\n");
722 return -EINVAL;
723 }
724
725 /* We do not support rx, tx, or other */
726 if (!channels ||
727 channels->rx_count ||
728 channels->tx_count ||
729 channels->other_count ||
730 (channels->combined_count < 1))
731 return -EINVAL;
732
733 if (channels->combined_count > max_chn) {
734 pr_info("combined channels too high, using %d\n", max_chn);
735 channels->combined_count = max_chn;
736 }
737
738 ret = netvsc_close(net);
739 if (ret)
740 goto out;
741
742 do_set:
743 nvdev->start_remove = true;
744 rndis_filter_device_remove(dev);
745
746 nvdev->num_chn = channels->combined_count;
747
748 net_device_ctx->device_ctx = dev;
749 hv_set_drvdata(dev, net);
750
751 memset(&device_info, 0, sizeof(device_info));
752 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
753 device_info.ring_size = ring_size;
754 device_info.max_num_vrss_chns = max_num_vrss_chns;
755
756 ret = rndis_filter_device_add(dev, &device_info);
757 if (ret) {
758 if (recovering) {
759 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
760 return ret;
761 }
762 goto recover;
763 }
764
765 nvdev = hv_get_drvdata(dev);
766
767 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
768 if (ret) {
769 if (recovering) {
770 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
771 return ret;
772 }
773 goto recover;
774 }
775
776 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
777 if (ret) {
778 if (recovering) {
779 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
780 return ret;
781 }
782 goto recover;
783 }
784
785 out:
786 netvsc_open(net);
787
788 return ret;
789
790 recover:
791 /* If the above failed, we attempt to recover through the same
792 * process but with the original number of channels.
793 */
794 netdev_err(net, "could not set channels, recovering\n");
795 recovering = true;
796 channels->combined_count = num_chn;
797 goto do_set;
798}
799
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800800static int netvsc_change_mtu(struct net_device *ndev, int mtu)
801{
802 struct net_device_context *ndevctx = netdev_priv(ndev);
803 struct hv_device *hdev = ndevctx->device_ctx;
804 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
805 struct netvsc_device_info device_info;
806 int limit = ETH_DATA_LEN;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700807 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800808
809 if (nvdev == NULL || nvdev->destroy)
810 return -ENODEV;
811
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800812 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
Haiyang Zhang4d3c9d32014-11-12 14:07:44 -0800813 limit = NETVSC_MTU - ETH_HLEN;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800814
Haiyang Zhangf9cbce32015-07-06 14:11:37 -0700815 if (mtu < NETVSC_MTU_MIN || mtu > limit)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800816 return -EINVAL;
817
Haiyang Zhang2de85302015-07-13 13:09:16 -0700818 ret = netvsc_close(ndev);
819 if (ret)
820 goto out;
821
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800822 nvdev->start_remove = true;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800823 rndis_filter_device_remove(hdev);
824
825 ndev->mtu = mtu;
826
827 ndevctx->device_ctx = hdev;
828 hv_set_drvdata(hdev, ndev);
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700829
830 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800831 device_info.ring_size = ring_size;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700832 device_info.num_chn = nvdev->num_chn;
KY Srinivasane01ec212015-05-27 13:16:57 -0700833 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800834 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800835
Haiyang Zhang2de85302015-07-13 13:09:16 -0700836out:
837 netvsc_open(ndev);
838
839 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800840}
841
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700842static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
843 struct rtnl_link_stats64 *t)
844{
845 struct net_device_context *ndev_ctx = netdev_priv(net);
846 int cpu;
847
848 for_each_possible_cpu(cpu) {
849 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
850 cpu);
851 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
852 cpu);
853 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
854 unsigned int start;
855
856 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700857 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700858 tx_packets = tx_stats->packets;
859 tx_bytes = tx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700860 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700861
862 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700863 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700864 rx_packets = rx_stats->packets;
865 rx_bytes = rx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700866 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700867
868 t->tx_bytes += tx_bytes;
869 t->tx_packets += tx_packets;
870 t->rx_bytes += rx_bytes;
871 t->rx_packets += rx_packets;
872 }
873
874 t->tx_dropped = net->stats.tx_dropped;
875 t->tx_errors = net->stats.tx_dropped;
876
877 t->rx_dropped = net->stats.rx_dropped;
878 t->rx_errors = net->stats.rx_errors;
879
880 return t;
881}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000882
883static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
884{
885 struct net_device_context *ndevctx = netdev_priv(ndev);
886 struct hv_device *hdev = ndevctx->device_ctx;
887 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000888 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000889 unsigned char save_aatype;
890 int err;
891
892 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
893 save_aatype = ndev->addr_assign_type;
894
895 err = eth_mac_addr(ndev, p);
896 if (err != 0)
897 return err;
898
899 err = rndis_filter_set_device_mac(hdev, addr->sa_data);
900 if (err != 0) {
901 /* roll back to saved MAC */
902 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
903 ndev->addr_assign_type = save_aatype;
904 }
905
906 return err;
907}
908
Richard Weinberger316158f2014-07-09 16:23:59 +0200909#ifdef CONFIG_NET_POLL_CONTROLLER
910static void netvsc_poll_controller(struct net_device *net)
911{
912 /* As netvsc_start_xmit() works synchronous we don't have to
913 * trigger anything here.
914 */
915}
916#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000917
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700918static const struct ethtool_ops ethtool_ops = {
919 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700920 .get_link = ethtool_op_get_link,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800921 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700922 .set_channels = netvsc_set_channels,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700923};
924
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700925static const struct net_device_ops device_ops = {
926 .ndo_open = netvsc_open,
927 .ndo_stop = netvsc_close,
928 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000929 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800930 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +0000931 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000932 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700933 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700934 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +0200935#ifdef CONFIG_NET_POLL_CONTROLLER
936 .ndo_poll_controller = netvsc_poll_controller,
937#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700938};
939
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700940/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100941 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
942 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
943 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700944 */
Haiyang Zhang891de742014-02-12 16:54:27 -0800945static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700946{
947 struct net_device_context *ndev_ctx;
948 struct net_device *net;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700949 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -0800950 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100951 struct netvsc_reconfig *event = NULL;
952 bool notify = false, reschedule = false;
953 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700954
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700955 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700956 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
Haiyang Zhang891de742014-02-12 16:54:27 -0800957 rdev = net_device->extension;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700958 net = net_device->ndev;
Haiyang Zhang891de742014-02-12 16:54:27 -0800959
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100960 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
961 if (time_is_after_jiffies(next_reconfig)) {
962 /* link_watch only sends one notification with current state
963 * per second, avoid doing reconfig more frequently. Handle
964 * wrap around.
965 */
966 delay = next_reconfig - jiffies;
967 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
968 schedule_delayed_work(&ndev_ctx->dwork, delay);
969 return;
970 }
971 ndev_ctx->last_reconfig = jiffies;
972
973 spin_lock_irqsave(&ndev_ctx->lock, flags);
974 if (!list_empty(&ndev_ctx->reconfig_events)) {
975 event = list_first_entry(&ndev_ctx->reconfig_events,
976 struct netvsc_reconfig, list);
977 list_del(&event->list);
978 reschedule = !list_empty(&ndev_ctx->reconfig_events);
979 }
980 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
981
982 if (!event)
983 return;
984
985 rtnl_lock();
986
987 switch (event->event) {
988 /* Only the following events are possible due to the check in
989 * netvsc_linkstatus_callback()
990 */
991 case RNDIS_STATUS_MEDIA_CONNECT:
992 if (rdev->link_state) {
993 rdev->link_state = false;
994 netif_carrier_on(net);
995 netif_tx_wake_all_queues(net);
996 } else {
997 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700998 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100999 kfree(event);
1000 break;
1001 case RNDIS_STATUS_MEDIA_DISCONNECT:
1002 if (!rdev->link_state) {
1003 rdev->link_state = true;
1004 netif_carrier_off(net);
1005 netif_tx_stop_all_queues(net);
1006 }
1007 kfree(event);
1008 break;
1009 case RNDIS_STATUS_NETWORK_CHANGE:
1010 /* Only makes sense if carrier is present */
1011 if (!rdev->link_state) {
1012 rdev->link_state = true;
1013 netif_carrier_off(net);
1014 netif_tx_stop_all_queues(net);
1015 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1016 spin_lock_irqsave(&ndev_ctx->lock, flags);
1017 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
1018 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1019 reschedule = true;
1020 }
1021 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001022 }
1023
1024 rtnl_unlock();
1025
1026 if (notify)
1027 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001028
1029 /* link_watch only sends one notification with current state per
1030 * second, handle next reconfig event in 2 seconds.
1031 */
1032 if (reschedule)
1033 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001034}
1035
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001036static void netvsc_free_netdev(struct net_device *netdev)
1037{
1038 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1039
1040 free_percpu(net_device_ctx->tx_stats);
1041 free_percpu(net_device_ctx->rx_stats);
1042 free_netdev(netdev);
1043}
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001044
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001045static int netvsc_probe(struct hv_device *dev,
1046 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001047{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001048 struct net_device *net = NULL;
1049 struct net_device_context *net_device_ctx;
1050 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001051 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001052 int ret;
1053
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001054 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1055 num_online_cpus());
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001056 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001057 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001058
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001059 netif_carrier_off(net);
1060
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001061 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001062 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001063 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1064 if (netif_msg_probe(net_device_ctx))
1065 netdev_dbg(net, "netvsc msg_enable: %d\n",
1066 net_device_ctx->msg_enable);
1067
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001068 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1069 if (!net_device_ctx->tx_stats) {
1070 free_netdev(net);
1071 return -ENOMEM;
1072 }
1073 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1074 if (!net_device_ctx->rx_stats) {
1075 free_percpu(net_device_ctx->tx_stats);
1076 free_netdev(net);
1077 return -ENOMEM;
1078 }
1079
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001080 hv_set_drvdata(dev, net);
Haiyang Zhang891de742014-02-12 16:54:27 -08001081 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +00001082 INIT_WORK(&net_device_ctx->work, do_set_multicast);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001083
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001084 spin_lock_init(&net_device_ctx->lock);
1085 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1086
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001087 net->netdev_ops = &device_ops;
1088
sixiao@microsoft.coma0606792016-02-04 15:49:34 -08001089 net->hw_features = NETVSC_HW_FEATURES;
1090 net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
Stephen Hemminger60487182010-05-04 09:58:55 -07001091
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001092 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001093 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001094
Haiyang Zhang692e0842011-09-01 12:19:43 -07001095 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001096 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001097 device_info.ring_size = ring_size;
KY Srinivasane01ec212015-05-27 13:16:57 -07001098 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001099 ret = rndis_filter_device_add(dev, &device_info);
1100 if (ret != 0) {
1101 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001102 netvsc_free_netdev(net);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001103 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001104 return ret;
1105 }
1106 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1107
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001108 nvdev = hv_get_drvdata(dev);
1109 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1110 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001111
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001112 ret = register_netdev(net);
1113 if (ret != 0) {
1114 pr_err("Unable to register netdev.\n");
1115 rndis_filter_device_remove(dev);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001116 netvsc_free_netdev(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001117 }
1118
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001119 return ret;
1120}
1121
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001122static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001123{
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001124 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001125 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001126 struct netvsc_device *net_device;
1127
1128 net_device = hv_get_drvdata(dev);
1129 net = net_device->ndev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001130
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001131 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001132 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001133 return 0;
1134 }
1135
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001136 net_device->start_remove = true;
1137
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001138 ndev_ctx = netdev_priv(net);
1139 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +00001140 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001141
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001142 /* Stop outbound asap */
Haiyang Zhang0a282532012-02-02 07:17:59 +00001143 netif_tx_disable(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001144
1145 unregister_netdev(net);
1146
1147 /*
1148 * Call to the vsc driver to let it know that the device is being
1149 * removed
1150 */
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001151 rndis_filter_device_remove(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001152
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001153 netvsc_free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001154 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001155}
1156
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001157static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001158 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001159 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001160 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001161};
1162
1163MODULE_DEVICE_TABLE(vmbus, id_table);
1164
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001165/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001166static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001167 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001168 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001169 .probe = netvsc_probe,
1170 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001171};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001172
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001173static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001174{
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001175 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001176}
1177
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001178static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001179{
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001180 if (ring_size < RING_SIZE_MIN) {
1181 ring_size = RING_SIZE_MIN;
1182 pr_info("Increased ring_size to %d (min allowed)\n",
1183 ring_size);
1184 }
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001185 return vmbus_driver_register(&netvsc_drv);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001186}
1187
Hank Janssen26c14cc2010-02-11 23:02:42 +00001188MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07001189MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001190
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001191module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001192module_exit(netvsc_drv_exit);