blob: 252e5d52d17e0de2589bc6af05a62d672f9916ec [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000043#define RING_SIZE_MIN 64
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010044#define LINKCHANGE_INT (2 * HZ)
stephen hemmingera50af862016-12-06 13:43:54 -080045
Hank Janssen99c8da02010-10-12 10:45:23 -070046static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070047module_param(ring_size, int, S_IRUGO);
48MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070049
Simon Xiao3f300ff2015-04-28 01:05:17 -070050static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
51 NETIF_MSG_LINK | NETIF_MSG_IFUP |
52 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
53 NETIF_MSG_TX_ERR;
54
55static int debug = -1;
56module_param(debug, int, S_IRUGO);
57MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
58
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080059static void do_set_multicast(struct work_struct *w)
60{
Wenqi Ma792df872012-04-19 00:39:37 +000061 struct net_device_context *ndevctx =
62 container_of(w, struct net_device_context, work);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020063 struct hv_device *device_obj = ndevctx->device_ctx;
64 struct net_device *ndev = hv_get_drvdata(device_obj);
stephen hemminger545a8e72017-03-22 14:51:00 -070065 struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080066 struct rndis_device *rdev;
67
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020068 if (!nvdev)
Wenqi Ma792df872012-04-19 00:39:37 +000069 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080070
71 rdev = nvdev->extension;
72 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000073 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080074
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020075 if (ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080076 rndis_filter_set_packet_filter(rdev,
77 NDIS_PACKET_TYPE_PROMISCUOUS);
78 else
79 rndis_filter_set_packet_filter(rdev,
80 NDIS_PACKET_TYPE_BROADCAST |
81 NDIS_PACKET_TYPE_ALL_MULTICAST |
82 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080083}
84
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070085static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070086{
Wenqi Ma792df872012-04-19 00:39:37 +000087 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080088
Wenqi Ma792df872012-04-19 00:39:37 +000089 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -070090}
91
Hank Janssenfceaf242009-07-13 15:34:54 -070092static int netvsc_open(struct net_device *net)
93{
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +020094 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
Haiyang Zhang891de742014-02-12 16:54:27 -080095 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070096 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -070097
Haiyang Zhang891de742014-02-12 16:54:27 -080098 netif_carrier_off(net);
99
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700100 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200101 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700102 if (ret != 0) {
103 netdev_err(net, "unable to open device (ret %d).\n", ret);
104 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700105 }
106
Haiyang Zhang2de85302015-07-13 13:09:16 -0700107 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700108
Haiyang Zhang891de742014-02-12 16:54:27 -0800109 rdev = nvdev->extension;
110 if (!rdev->link_state)
111 netif_carrier_on(net);
112
Hank Janssenfceaf242009-07-13 15:34:54 -0700113 return ret;
114}
115
Hank Janssenfceaf242009-07-13 15:34:54 -0700116static int netvsc_close(struct net_device *net)
117{
Hank Janssenfceaf242009-07-13 15:34:54 -0700118 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700119 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700120 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700121 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
122 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700123
Haiyang Zhang0a282532012-02-02 07:17:59 +0000124 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700125
Wenqi Ma792df872012-04-19 00:39:37 +0000126 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
127 cancel_work_sync(&net_device_ctx->work);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200128 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700129 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700130 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700131 return ret;
132 }
133
134 /* Ensure pending bytes in ring are read */
135 while (true) {
136 aread = 0;
137 for (i = 0; i < nvdev->num_chn; i++) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800138 chn = nvdev->chan_table[i].channel;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700139 if (!chn)
140 continue;
141
142 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
143 &awrite);
144
145 if (aread)
146 break;
147
148 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
149 &awrite);
150
151 if (aread)
152 break;
153 }
154
155 retry++;
156 if (retry > retry_max || aread == 0)
157 break;
158
159 msleep(msec);
160
161 if (msec < 1000)
162 msec *= 2;
163 }
164
165 if (aread) {
166 netdev_err(net, "Ring buffer not empty after closing rndis\n");
167 ret = -ETIMEDOUT;
168 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700169
Hank Janssenfceaf242009-07-13 15:34:54 -0700170 return ret;
171}
172
KY Srinivasan8a002512014-03-08 19:23:14 -0800173static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
174 int pkt_type)
175{
176 struct rndis_packet *rndis_pkt;
177 struct rndis_per_packet_info *ppi;
178
179 rndis_pkt = &msg->msg.pkt;
180 rndis_pkt->data_offset += ppi_size;
181
182 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
183 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
184
185 ppi->size = ppi_size;
186 ppi->type = pkt_type;
187 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
188
189 rndis_pkt->per_pkt_info_len += ppi_size;
190
191 return ppi;
192}
193
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700194/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute
195 * hash for non-TCP traffic with only IP numbers.
196 */
197static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk)
198{
199 struct flow_keys flow;
200 u32 hash;
201 static u32 hashrnd __read_mostly;
202
203 net_get_random_once(&hashrnd, sizeof(hashrnd));
204
205 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
206 return 0;
207
208 if (flow.basic.ip_proto == IPPROTO_TCP) {
209 return skb_get_hash(skb);
210 } else {
211 if (flow.basic.n_proto == htons(ETH_P_IP))
212 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
213 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
214 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
215 else
216 hash = 0;
217
218 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
219 }
220
221 return hash;
222}
223
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700224static inline int netvsc_get_tx_queue(struct net_device *ndev,
225 struct sk_buff *skb, int old_idx)
226{
227 const struct net_device_context *ndc = netdev_priv(ndev);
228 struct sock *sk = skb->sk;
229 int q_idx;
230
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700231 q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) &
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700232 (VRSS_SEND_TAB_SIZE - 1)];
233
234 /* If queue index changed record the new value */
235 if (q_idx != old_idx &&
236 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
237 sk_tx_queue_set(sk, q_idx);
238
239 return q_idx;
240}
241
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800242/*
243 * Select queue for transmit.
244 *
245 * If a valid queue has already been assigned, then use that.
246 * Otherwise compute tx queue based on hash and the send table.
247 *
248 * This is basically similar to default (__netdev_pick_tx) with the added step
249 * of using the host send_table when no other queue has been assigned.
250 *
251 * TODO support XPS - but get_xps_queue not exported
252 */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700253static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
254 void *accel_priv, select_queue_fallback_t fallback)
255{
stephen hemminger7ce10122017-03-09 14:58:29 -0800256 unsigned int num_tx_queues = ndev->real_num_tx_queues;
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700257 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700258
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700259 if (q_idx < 0 || skb->ooo_okay) {
260 /* If forwarding a packet, we use the recorded queue when
261 * available for better cache locality.
262 */
263 if (skb_rx_queue_recorded(skb))
264 q_idx = skb_get_rx_queue(skb);
265 else
266 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800267 }
268
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700269 while (unlikely(q_idx >= num_tx_queues))
270 q_idx -= num_tx_queues;
271
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700272 return q_idx;
273}
274
KY Srinivasan54a73572014-03-08 19:23:13 -0800275static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
276 struct hv_page_buffer *pb)
277{
278 int j = 0;
279
280 /* Deal with compund pages by ignoring unused part
281 * of the page.
282 */
283 page += (offset >> PAGE_SHIFT);
284 offset &= ~PAGE_MASK;
285
286 while (len > 0) {
287 unsigned long bytes;
288
289 bytes = PAGE_SIZE - offset;
290 if (bytes > len)
291 bytes = len;
292 pb[j].pfn = page_to_pfn(page);
293 pb[j].offset = offset;
294 pb[j].len = bytes;
295
296 offset += bytes;
297 len -= bytes;
298
299 if (offset == PAGE_SIZE && len) {
300 page++;
301 offset = 0;
302 j++;
303 }
304 }
305
306 return j + 1;
307}
308
KY Srinivasan8a002512014-03-08 19:23:14 -0800309static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800310 struct hv_netvsc_packet *packet,
311 struct hv_page_buffer **page_buf)
KY Srinivasan54a73572014-03-08 19:23:13 -0800312{
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800313 struct hv_page_buffer *pb = *page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800314 u32 slots_used = 0;
315 char *data = skb->data;
316 int frags = skb_shinfo(skb)->nr_frags;
317 int i;
318
319 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700320 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800321 * 2. skb linear data
322 * 3. skb fragment data
323 */
324 if (hdr != NULL)
325 slots_used += fill_pg_buf(virt_to_page(hdr),
326 offset_in_page(hdr),
327 len, &pb[slots_used]);
328
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700329 packet->rmsg_size = len;
330 packet->rmsg_pgcnt = slots_used;
331
KY Srinivasan54a73572014-03-08 19:23:13 -0800332 slots_used += fill_pg_buf(virt_to_page(data),
333 offset_in_page(data),
334 skb_headlen(skb), &pb[slots_used]);
335
336 for (i = 0; i < frags; i++) {
337 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
338
339 slots_used += fill_pg_buf(skb_frag_page(frag),
340 frag->page_offset,
341 skb_frag_size(frag), &pb[slots_used]);
342 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800343 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800344}
345
346static int count_skb_frag_slots(struct sk_buff *skb)
347{
348 int i, frags = skb_shinfo(skb)->nr_frags;
349 int pages = 0;
350
351 for (i = 0; i < frags; i++) {
352 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
353 unsigned long size = skb_frag_size(frag);
354 unsigned long offset = frag->page_offset;
355
356 /* Skip unused frames from start of page */
357 offset &= ~PAGE_MASK;
358 pages += PFN_UP(offset + size);
359 }
360 return pages;
361}
362
363static int netvsc_get_slots(struct sk_buff *skb)
364{
365 char *data = skb->data;
366 unsigned int offset = offset_in_page(data);
367 unsigned int len = skb_headlen(skb);
368 int slots;
369 int frag_slots;
370
371 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
372 frag_slots = count_skb_frag_slots(skb);
373 return slots + frag_slots;
374}
375
stephen hemminger23312a32017-01-24 13:05:59 -0800376static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800377{
stephen hemminger23312a32017-01-24 13:05:59 -0800378 if (skb->protocol == htons(ETH_P_IP)) {
379 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800380
stephen hemminger23312a32017-01-24 13:05:59 -0800381 if (ip->protocol == IPPROTO_TCP)
382 return TRANSPORT_INFO_IPV4_TCP;
383 else if (ip->protocol == IPPROTO_UDP)
384 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800385 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800386 struct ipv6hdr *ip6 = ipv6_hdr(skb);
387
388 if (ip6->nexthdr == IPPROTO_TCP)
389 return TRANSPORT_INFO_IPV6_TCP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800390 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800391 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800392 }
393
stephen hemminger23312a32017-01-24 13:05:59 -0800394 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800395}
396
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700397static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700398{
Hank Janssenfceaf242009-07-13 15:34:54 -0700399 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200400 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700401 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800402 unsigned int num_data_pgs;
403 struct rndis_message *rndis_msg;
404 struct rndis_packet *rndis_pkt;
405 u32 rndis_msg_size;
KY Srinivasan8a002512014-03-08 19:23:14 -0800406 struct rndis_per_packet_info *ppi;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700407 u32 hash;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700408 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800409 struct hv_page_buffer *pb = page_buf;
Hank Janssenfceaf242009-07-13 15:34:54 -0700410
KY Srinivasan54a73572014-03-08 19:23:13 -0800411 /* We will atmost need two pages to describe the rndis
412 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200413 * of pages in a single packet. If skb is scattered around
414 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800415 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200416
KY Srinivasan8a002512014-03-08 19:23:14 -0800417 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700418
419 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700420 ++net_device_ctx->eth_stats.tx_scattered;
421
422 if (skb_linearize(skb))
423 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700424
425 num_data_pgs = netvsc_get_slots(skb) + 2;
426 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700427 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700428 goto drop;
429 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800430 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700431
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800432 /*
433 * Place the rndis header in the skb head room and
434 * the skb->cb will be used for hv_netvsc_packet
435 * structure.
436 */
437 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700438 if (ret)
439 goto no_memory;
440
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800441 /* Use the skb control buffer for building up the packet */
442 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
443 FIELD_SIZEOF(struct sk_buff, cb));
444 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700445
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700446 packet->q_idx = skb_get_queue_mapping(skb);
447
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800448 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800449 packet->total_bytes = skb->len;
450 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700451
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800452 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700453
KY Srinivasan24476762015-12-01 16:43:06 -0800454 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700455
KY Srinivasan8a002512014-03-08 19:23:14 -0800456 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800457 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
458 rndis_msg->msg_len = packet->total_data_buflen;
459 rndis_pkt = &rndis_msg->msg.pkt;
460 rndis_pkt->data_offset = sizeof(struct rndis_packet);
461 rndis_pkt->data_len = packet->total_data_buflen;
462 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
463
464 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
465
Haiyang Zhang307f0992014-05-21 12:55:39 -0700466 hash = skb_get_hash_raw(skb);
467 if (hash != 0 && net->real_num_tx_queues > 1) {
468 rndis_msg_size += NDIS_HASH_PPI_SIZE;
469 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
470 NBL_HASH_VALUE);
471 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
472 }
473
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700474 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800475 struct ndis_pkt_8021q_info *vlan;
476
477 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
478 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
479 IEEE_8021Q_INFO);
480 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
481 ppi->ppi_offset);
KY Srinivasan760d1e32015-12-01 16:43:19 -0800482 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
483 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800484 VLAN_PRIO_SHIFT;
485 }
486
stephen hemminger23312a32017-01-24 13:05:59 -0800487 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700488 struct ndis_tcp_lso_info *lso_info;
489
490 rndis_msg_size += NDIS_LSO_PPI_SIZE;
491 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
492 TCP_LARGESEND_PKTINFO);
493
494 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
495 ppi->ppi_offset);
496
497 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800498 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700499 lso_info->lso_v2_transmit.ip_version =
500 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
501 ip_hdr(skb)->tot_len = 0;
502 ip_hdr(skb)->check = 0;
503 tcp_hdr(skb)->check =
504 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
505 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
506 } else {
507 lso_info->lso_v2_transmit.ip_version =
508 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
509 ipv6_hdr(skb)->payload_len = 0;
510 tcp_hdr(skb)->check =
511 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
512 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
513 }
stephen hemminger23312a32017-01-24 13:05:59 -0800514 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700515 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700516 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800517 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
518 struct ndis_tcp_ip_checksum_info *csum_info;
519
stephen hemmingerad19bc82016-10-11 14:03:07 -0700520 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
521 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
522 TCPIP_CHKSUM_PKTINFO);
523
524 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
525 ppi->ppi_offset);
526
stephen hemminger23312a32017-01-24 13:05:59 -0800527 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
528
529 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700530 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800531
532 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
533 csum_info->transmit.tcp_checksum = 1;
534 else
535 csum_info->transmit.udp_checksum = 1;
536 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700537 csum_info->transmit.is_ipv6 = 1;
538
stephen hemminger23312a32017-01-24 13:05:59 -0800539 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
540 csum_info->transmit.tcp_checksum = 1;
541 else
542 csum_info->transmit.udp_checksum = 1;
543 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700544 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800545 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700546 if (skb_checksum_help(skb))
547 goto drop;
548 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700549 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800550
KY Srinivasan8a002512014-03-08 19:23:14 -0800551 /* Start filling in the page buffers with the rndis hdr */
552 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700553 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800554 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800555 skb, packet, &pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800556
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800557 /* timestamp packet in software */
558 skb_tx_timestamp(skb);
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800559 ret = netvsc_send(net_device_ctx->device_ctx, packet,
560 rndis_msg, &pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800561 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700562 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700563
564 if (ret == -EAGAIN) {
565 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700566 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700567 }
568
569 if (ret == -ENOSPC)
570 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700571
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700572drop:
573 dev_kfree_skb_any(skb);
574 net->stats.tx_dropped++;
575
576 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700577
578no_memory:
579 ++net_device_ctx->eth_stats.tx_no_memory;
580 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700581}
Hank Janssen3e189512010-03-04 22:11:00 +0000582/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700583 * netvsc_linkstatus_callback - Link up/down notification
584 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700585void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700586 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700587{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700588 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700589 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700590 struct net_device_context *ndev_ctx;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100591 struct netvsc_reconfig *event;
592 unsigned long flags;
593
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700594 net = hv_get_drvdata(device_obj);
595
596 if (!net)
597 return;
598
599 ndev_ctx = netdev_priv(net);
600
601 /* Update the physical link speed when changing to another vSwitch */
602 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
603 u32 speed;
604
605 speed = *(u32 *)((void *)indicate + indicate->
606 status_buf_offset) / 10000;
607 ndev_ctx->speed = speed;
608 return;
609 }
610
611 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100612 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
613 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
614 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
615 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700616
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700617 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700618 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700619
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100620 event = kzalloc(sizeof(*event), GFP_ATOMIC);
621 if (!event)
622 return;
623 event->event = indicate->status;
624
625 spin_lock_irqsave(&ndev_ctx->lock, flags);
626 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
627 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
628
629 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700630}
631
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700632static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800633 struct napi_struct *napi,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800634 const struct ndis_tcp_ip_checksum_info *csum_info,
635 const struct ndis_pkt_8021q_info *vlan,
636 void *data, u32 buflen)
Hank Janssenfceaf242009-07-13 15:34:54 -0700637{
Hank Janssenfceaf242009-07-13 15:34:54 -0700638 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700639
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800640 skb = napi_alloc_skb(napi, buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700641 if (!skb)
642 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700643
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700644 /*
645 * Copy to skb. This copy is needed here since the memory pointed by
646 * hv_netvsc_packet cannot be deallocated
647 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800648 memcpy(skb_put(skb, buflen), data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700649
650 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700651
652 /* skb is already created with CHECKSUM_NONE */
653 skb_checksum_none_assert(skb);
654
655 /*
656 * In Linux, the IP checksum is always checked.
657 * Do L4 checksum offload if enabled and present.
658 */
659 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
660 if (csum_info->receive.tcp_checksum_succeeded ||
661 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800662 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800663 }
664
stephen hemmingerdc54a082017-01-24 13:06:08 -0800665 if (vlan) {
666 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
667
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700668 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800669 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800670 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700671
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700672 return skb;
673}
674
675/*
676 * netvsc_recv_callback - Callback when we receive a packet from the
677 * "wire" on the specified device.
678 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800679int netvsc_recv_callback(struct net_device *net,
680 struct vmbus_channel *channel,
681 void *data, u32 len,
682 const struct ndis_tcp_ip_checksum_info *csum_info,
683 const struct ndis_pkt_8021q_info *vlan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700684{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200685 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700686 struct netvsc_device *net_device;
stephen hemminger742fe542017-02-27 10:26:50 -0800687 u16 q_idx = channel->offermsg.offer.sub_channel_index;
stephen hemminger545a8e72017-03-22 14:51:00 -0700688 struct netvsc_channel *nvchan;
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700689 struct net_device *vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700690 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700691 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700692
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700693 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700694 return NVSP_STAT_FAIL;
695
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700696 /*
697 * If necessary, inject this packet into the VF interface.
698 * On Hyper-V, multicast and brodcast packets are only delivered
699 * to the synthetic interface (after subjecting these to
700 * policy filters on the host). Deliver these via the VF
701 * interface in the guest.
702 */
stephen hemminger0719e722017-01-11 09:16:32 -0800703 rcu_read_lock();
stephen hemminger545a8e72017-03-22 14:51:00 -0700704 net_device = rcu_dereference(net_device_ctx->nvdev);
705 if (unlikely(!net_device))
706 goto drop;
707
708 nvchan = &net_device->chan_table[q_idx];
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700709 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700710 if (vf_netdev && (vf_netdev->flags & IFF_UP))
711 net = vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700712
713 /* Allocate a skb - TODO direct I/O to pages? */
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800714 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
715 csum_info, vlan, data, len);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700716 if (unlikely(!skb)) {
stephen hemminger545a8e72017-03-22 14:51:00 -0700717drop:
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700718 ++net->stats.rx_dropped;
stephen hemminger0719e722017-01-11 09:16:32 -0800719 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700720 return NVSP_STAT_FAIL;
721 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700722
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700723 if (net != vf_netdev)
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800724 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700725
726 /*
727 * Even if injecting the packet, record the statistics
728 * on the synthetic device because modifying the VF device
729 * statistics will not work correctly.
730 */
stephen hemminger742fe542017-02-27 10:26:50 -0800731 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700732 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700733 rx_stats->packets++;
stephen hemmingerdc54a082017-01-24 13:06:08 -0800734 rx_stats->bytes += len;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700735
736 if (skb->pkt_type == PACKET_BROADCAST)
737 ++rx_stats->broadcast;
738 else if (skb->pkt_type == PACKET_MULTICAST)
739 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700740 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800741
stephen hemminger742fe542017-02-27 10:26:50 -0800742 napi_gro_receive(&nvchan->napi, skb);
stephen hemminger0719e722017-01-11 09:16:32 -0800743 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700744
Hank Janssenfceaf242009-07-13 15:34:54 -0700745 return 0;
746}
747
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700748static void netvsc_get_drvinfo(struct net_device *net,
749 struct ethtool_drvinfo *info)
750{
Jiri Pirko7826d432013-01-06 00:44:26 +0000751 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000752 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700753}
754
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800755static void netvsc_get_channels(struct net_device *net,
756 struct ethtool_channels *channel)
757{
758 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700759 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800760
761 if (nvdev) {
762 channel->max_combined = nvdev->max_chn;
763 channel->combined_count = nvdev->num_chn;
764 }
765}
766
stephen hemminger2b018882017-01-24 13:06:03 -0800767static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
768 u32 num_chn)
769{
770 struct netvsc_device_info device_info;
771 int ret;
772
773 memset(&device_info, 0, sizeof(device_info));
774 device_info.num_chn = num_chn;
775 device_info.ring_size = ring_size;
776 device_info.max_num_vrss_chns = num_chn;
777
778 ret = rndis_filter_device_add(dev, &device_info);
779 if (ret)
780 return ret;
781
782 ret = netif_set_real_num_tx_queues(net, num_chn);
783 if (ret)
784 return ret;
785
786 ret = netif_set_real_num_rx_queues(net, num_chn);
787
788 return ret;
789}
790
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700791static int netvsc_set_channels(struct net_device *net,
792 struct ethtool_channels *channels)
793{
794 struct net_device_context *net_device_ctx = netdev_priv(net);
795 struct hv_device *dev = net_device_ctx->device_ctx;
stephen hemminger545a8e72017-03-22 14:51:00 -0700796 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger2b018882017-01-24 13:06:03 -0800797 unsigned int count = channels->combined_count;
stephen hemminger163891d2017-03-22 14:50:58 -0700798 bool was_running;
stephen hemminger2b018882017-01-24 13:06:03 -0800799 int ret;
800
801 /* We do not support separate count for rx, tx, or other */
802 if (count == 0 ||
803 channels->rx_count || channels->tx_count || channels->other_count)
804 return -EINVAL;
805
806 if (count > net->num_tx_queues || count > net->num_rx_queues)
807 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700808
stephen hemmingera0be4502017-03-22 14:51:01 -0700809 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700810 return -ENODEV;
811
stephen hemminger2b018882017-01-24 13:06:03 -0800812 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700813 return -EINVAL;
814
stephen hemminger2b018882017-01-24 13:06:03 -0800815 if (count > nvdev->max_chn)
816 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700817
stephen hemminger163891d2017-03-22 14:50:58 -0700818 was_running = netif_running(net);
819 if (was_running) {
820 ret = netvsc_close(net);
821 if (ret)
822 return ret;
823 }
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700824
stephen hemminger2289f0a2017-01-24 13:06:10 -0800825 rndis_filter_device_remove(dev, nvdev);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700826
stephen hemminger2b018882017-01-24 13:06:03 -0800827 ret = netvsc_set_queues(net, dev, count);
828 if (ret == 0)
829 nvdev->num_chn = count;
830 else
831 netvsc_set_queues(net, dev, nvdev->num_chn);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700832
stephen hemminger163891d2017-03-22 14:50:58 -0700833 if (was_running)
834 ret = netvsc_open(net);
835
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200836 /* We may have missed link change notifications */
837 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700838
839 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700840}
841
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100842static bool
843netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800844{
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100845 struct ethtool_link_ksettings diff1 = *cmd;
846 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800847
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100848 diff1.base.speed = 0;
849 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800850 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100851 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
852 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800853 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100854 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800855
856 return !memcmp(&diff1, &diff2, sizeof(diff1));
857}
858
859static void netvsc_init_settings(struct net_device *dev)
860{
861 struct net_device_context *ndc = netdev_priv(dev);
862
863 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -0700864 ndc->duplex = DUPLEX_FULL;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800865}
866
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100867static int netvsc_get_link_ksettings(struct net_device *dev,
868 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800869{
870 struct net_device_context *ndc = netdev_priv(dev);
871
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100872 cmd->base.speed = ndc->speed;
873 cmd->base.duplex = ndc->duplex;
874 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800875
876 return 0;
877}
878
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100879static int netvsc_set_link_ksettings(struct net_device *dev,
880 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800881{
882 struct net_device_context *ndc = netdev_priv(dev);
883 u32 speed;
884
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100885 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800886 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100887 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800888 !netvsc_validate_ethtool_ss_cmd(cmd))
889 return -EINVAL;
890
891 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100892 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800893
894 return 0;
895}
896
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800897static int netvsc_change_mtu(struct net_device *ndev, int mtu)
898{
899 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -0700900 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200901 struct hv_device *hdev = ndevctx->device_ctx;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800902 struct netvsc_device_info device_info;
stephen hemminger163891d2017-03-22 14:50:58 -0700903 bool was_running;
K. Y. Srinivasan386f5762017-03-24 20:54:37 -0700904 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800905
stephen hemmingera0be4502017-03-22 14:51:01 -0700906 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800907 return -ENODEV;
908
stephen hemminger163891d2017-03-22 14:50:58 -0700909 was_running = netif_running(ndev);
910 if (was_running) {
911 ret = netvsc_close(ndev);
912 if (ret)
913 return ret;
914 }
Haiyang Zhang2de85302015-07-13 13:09:16 -0700915
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700916 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800917 device_info.ring_size = ring_size;
stephen hemminger2b018882017-01-24 13:06:03 -0800918 device_info.num_chn = nvdev->num_chn;
919 device_info.max_num_vrss_chns = nvdev->num_chn;
Dexuan Cui152669b2017-03-02 13:00:53 +0000920
Dexuan Cui152669b2017-03-02 13:00:53 +0000921 rndis_filter_device_remove(hdev, nvdev);
922
923 /* 'nvdev' has been freed in rndis_filter_device_remove() ->
924 * netvsc_device_remove () -> free_netvsc_device().
925 * We mustn't access it before it's re-created in
926 * rndis_filter_device_add() -> netvsc_device_add().
927 */
928
929 ndev->mtu = mtu;
930
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800931 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800932
stephen hemminger163891d2017-03-22 14:50:58 -0700933 if (was_running)
934 ret = netvsc_open(ndev);
935
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200936 /* We may have missed link change notifications */
937 schedule_delayed_work(&ndevctx->dwork, 0);
938
Haiyang Zhang2de85302015-07-13 13:09:16 -0700939 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800940}
941
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800942static void netvsc_get_stats64(struct net_device *net,
943 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700944{
945 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -0700946 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800947 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700948
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800949 if (!nvdev)
950 return;
951
952 for (i = 0; i < nvdev->num_chn; i++) {
953 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
954 const struct netvsc_stats *stats;
955 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700956 unsigned int start;
957
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800958 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700959 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800960 start = u64_stats_fetch_begin_irq(&stats->syncp);
961 packets = stats->packets;
962 bytes = stats->bytes;
963 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700964
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800965 t->tx_bytes += bytes;
966 t->tx_packets += packets;
967
968 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700969 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800970 start = u64_stats_fetch_begin_irq(&stats->syncp);
971 packets = stats->packets;
972 bytes = stats->bytes;
973 multicast = stats->multicast + stats->broadcast;
974 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700975
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800976 t->rx_bytes += bytes;
977 t->rx_packets += packets;
978 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700979 }
980
981 t->tx_dropped = net->stats.tx_dropped;
Simon Xiaob5124722017-02-17 11:36:20 -0800982 t->tx_errors = net->stats.tx_errors;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700983
984 t->rx_dropped = net->stats.rx_dropped;
985 t->rx_errors = net->stats.rx_errors;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700986}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000987
988static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
989{
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000990 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000991 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000992 unsigned char save_aatype;
993 int err;
994
995 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
996 save_aatype = ndev->addr_assign_type;
997
998 err = eth_mac_addr(ndev, p);
999 if (err != 0)
1000 return err;
1001
Vitaly Kuznetsove834da9a2016-06-03 17:51:01 +02001002 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001003 if (err != 0) {
1004 /* roll back to saved MAC */
1005 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
1006 ndev->addr_assign_type = save_aatype;
1007 }
1008
1009 return err;
1010}
1011
Stephen Hemminger4323b472016-08-23 12:17:57 -07001012static const struct {
1013 char name[ETH_GSTRING_LEN];
1014 u16 offset;
1015} netvsc_stats[] = {
1016 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1017 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1018 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1019 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1020 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1021};
1022
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001023#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1024
1025/* 4 statistics per queue (rx/tx packets/bytes) */
1026#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1027
Stephen Hemminger4323b472016-08-23 12:17:57 -07001028static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1029{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001030 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001031 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001032
1033 if (!nvdev)
1034 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001035
Stephen Hemminger4323b472016-08-23 12:17:57 -07001036 switch (string_set) {
1037 case ETH_SS_STATS:
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001038 return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001039 default:
1040 return -EINVAL;
1041 }
1042}
1043
1044static void netvsc_get_ethtool_stats(struct net_device *dev,
1045 struct ethtool_stats *stats, u64 *data)
1046{
1047 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001048 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001049 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001050 const struct netvsc_stats *qstats;
1051 unsigned int start;
1052 u64 packets, bytes;
1053 int i, j;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001054
stephen hemminger545a8e72017-03-22 14:51:00 -07001055 if (!nvdev)
1056 return;
1057
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001058 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001059 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001060
1061 for (j = 0; j < nvdev->num_chn; j++) {
1062 qstats = &nvdev->chan_table[j].tx_stats;
1063
1064 do {
1065 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1066 packets = qstats->packets;
1067 bytes = qstats->bytes;
1068 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1069 data[i++] = packets;
1070 data[i++] = bytes;
1071
1072 qstats = &nvdev->chan_table[j].rx_stats;
1073 do {
1074 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1075 packets = qstats->packets;
1076 bytes = qstats->bytes;
1077 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1078 data[i++] = packets;
1079 data[i++] = bytes;
1080 }
Stephen Hemminger4323b472016-08-23 12:17:57 -07001081}
1082
1083static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1084{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001085 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001086 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001087 u8 *p = data;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001088 int i;
1089
stephen hemminger545a8e72017-03-22 14:51:00 -07001090 if (!nvdev)
1091 return;
1092
Stephen Hemminger4323b472016-08-23 12:17:57 -07001093 switch (stringset) {
1094 case ETH_SS_STATS:
1095 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001096 memcpy(p + i * ETH_GSTRING_LEN,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001097 netvsc_stats[i].name, ETH_GSTRING_LEN);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001098
1099 p += i * ETH_GSTRING_LEN;
1100 for (i = 0; i < nvdev->num_chn; i++) {
1101 sprintf(p, "tx_queue_%u_packets", i);
1102 p += ETH_GSTRING_LEN;
1103 sprintf(p, "tx_queue_%u_bytes", i);
1104 p += ETH_GSTRING_LEN;
1105 sprintf(p, "rx_queue_%u_packets", i);
1106 p += ETH_GSTRING_LEN;
1107 sprintf(p, "rx_queue_%u_bytes", i);
1108 p += ETH_GSTRING_LEN;
1109 }
1110
Stephen Hemminger4323b472016-08-23 12:17:57 -07001111 break;
1112 }
1113}
1114
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001115static int
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001116netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
1117 struct ethtool_rxnfc *info)
1118{
1119 info->data = RXH_IP_SRC | RXH_IP_DST;
1120
1121 switch (info->flow_type) {
1122 case TCP_V4_FLOW:
1123 case TCP_V6_FLOW:
1124 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1125 /* fallthrough */
1126 case UDP_V4_FLOW:
1127 case UDP_V6_FLOW:
1128 case IPV4_FLOW:
1129 case IPV6_FLOW:
1130 break;
1131 default:
1132 info->data = 0;
1133 break;
1134 }
1135
1136 return 0;
1137}
1138
1139static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001140netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1141 u32 *rules)
1142{
1143 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001144 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
1145
1146 if (!nvdev)
1147 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001148
1149 switch (info->cmd) {
1150 case ETHTOOL_GRXRINGS:
1151 info->data = nvdev->num_chn;
1152 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001153
1154 case ETHTOOL_GRXFH:
1155 return netvsc_get_rss_hash_opts(nvdev, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001156 }
1157 return -EOPNOTSUPP;
1158}
1159
Richard Weinberger316158f2014-07-09 16:23:59 +02001160#ifdef CONFIG_NET_POLL_CONTROLLER
stephen hemmingera5ecd432017-06-07 15:53:48 -07001161static void netvsc_poll_controller(struct net_device *dev)
Richard Weinberger316158f2014-07-09 16:23:59 +02001162{
stephen hemmingera5ecd432017-06-07 15:53:48 -07001163 struct net_device_context *ndc = netdev_priv(dev);
1164 struct netvsc_device *ndev;
1165 int i;
1166
1167 rcu_read_lock();
1168 ndev = rcu_dereference(ndc->nvdev);
1169 if (ndev) {
1170 for (i = 0; i < ndev->num_chn; i++) {
1171 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1172
1173 napi_schedule(&nvchan->napi);
1174 }
1175 }
1176 rcu_read_unlock();
Richard Weinberger316158f2014-07-09 16:23:59 +02001177}
1178#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001179
stephen hemminger962f3fe2017-01-24 13:06:02 -08001180static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1181{
1182 return NETVSC_HASH_KEYLEN;
1183}
1184
1185static u32 netvsc_rss_indir_size(struct net_device *dev)
1186{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001187 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001188}
1189
1190static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1191 u8 *hfunc)
1192{
1193 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001194 struct netvsc_device *ndev = rcu_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001195 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001196 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001197
stephen hemminger545a8e72017-03-22 14:51:00 -07001198 if (!ndev)
1199 return -ENODEV;
1200
stephen hemminger962f3fe2017-01-24 13:06:02 -08001201 if (hfunc)
1202 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1203
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001204 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001205 if (indir) {
1206 for (i = 0; i < ITAB_NUM; i++)
1207 indir[i] = rndis_dev->ind_table[i];
1208 }
1209
stephen hemminger962f3fe2017-01-24 13:06:02 -08001210 if (key)
1211 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1212
1213 return 0;
1214}
1215
1216static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1217 const u8 *key, const u8 hfunc)
1218{
1219 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001220 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001221 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001222 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001223
stephen hemminger545a8e72017-03-22 14:51:00 -07001224 if (!ndev)
1225 return -ENODEV;
1226
stephen hemminger962f3fe2017-01-24 13:06:02 -08001227 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1228 return -EOPNOTSUPP;
1229
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001230 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001231 if (indir) {
1232 for (i = 0; i < ITAB_NUM; i++)
1233 if (indir[i] >= dev->num_rx_queues)
1234 return -EINVAL;
1235
1236 for (i = 0; i < ITAB_NUM; i++)
1237 rndis_dev->ind_table[i] = indir[i];
1238 }
1239
1240 if (!key) {
1241 if (!indir)
1242 return 0;
1243
1244 key = rndis_dev->rss_key;
1245 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001246
1247 return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
1248}
1249
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001250static const struct ethtool_ops ethtool_ops = {
1251 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001252 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001253 .get_ethtool_stats = netvsc_get_ethtool_stats,
1254 .get_sset_count = netvsc_get_sset_count,
1255 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001256 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001257 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001258 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001259 .get_rxnfc = netvsc_get_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001260 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1261 .get_rxfh_indir_size = netvsc_rss_indir_size,
1262 .get_rxfh = netvsc_get_rxfh,
1263 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001264 .get_link_ksettings = netvsc_get_link_ksettings,
1265 .set_link_ksettings = netvsc_set_link_ksettings,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001266};
1267
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001268static const struct net_device_ops device_ops = {
1269 .ndo_open = netvsc_open,
1270 .ndo_stop = netvsc_close,
1271 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001272 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001273 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001274 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001275 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001276 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001277 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001278#ifdef CONFIG_NET_POLL_CONTROLLER
1279 .ndo_poll_controller = netvsc_poll_controller,
1280#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001281};
1282
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001283/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001284 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1285 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1286 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001287 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001288static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001289{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001290 struct net_device_context *ndev_ctx =
1291 container_of(w, struct net_device_context, dwork.work);
1292 struct hv_device *device_obj = ndev_ctx->device_ctx;
1293 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001294 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001295 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001296 struct netvsc_reconfig *event = NULL;
1297 bool notify = false, reschedule = false;
1298 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001299
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001300 rtnl_lock();
stephen hemmingera0be4502017-03-22 14:51:01 -07001301 net_device = rtnl_dereference(ndev_ctx->nvdev);
1302 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001303 goto out_unlock;
1304
Haiyang Zhang891de742014-02-12 16:54:27 -08001305 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001306
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001307 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1308 if (time_is_after_jiffies(next_reconfig)) {
1309 /* link_watch only sends one notification with current state
1310 * per second, avoid doing reconfig more frequently. Handle
1311 * wrap around.
1312 */
1313 delay = next_reconfig - jiffies;
1314 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1315 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001316 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001317 }
1318 ndev_ctx->last_reconfig = jiffies;
1319
1320 spin_lock_irqsave(&ndev_ctx->lock, flags);
1321 if (!list_empty(&ndev_ctx->reconfig_events)) {
1322 event = list_first_entry(&ndev_ctx->reconfig_events,
1323 struct netvsc_reconfig, list);
1324 list_del(&event->list);
1325 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1326 }
1327 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1328
1329 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001330 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001331
1332 switch (event->event) {
1333 /* Only the following events are possible due to the check in
1334 * netvsc_linkstatus_callback()
1335 */
1336 case RNDIS_STATUS_MEDIA_CONNECT:
1337 if (rdev->link_state) {
1338 rdev->link_state = false;
1339 netif_carrier_on(net);
1340 netif_tx_wake_all_queues(net);
1341 } else {
1342 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001343 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001344 kfree(event);
1345 break;
1346 case RNDIS_STATUS_MEDIA_DISCONNECT:
1347 if (!rdev->link_state) {
1348 rdev->link_state = true;
1349 netif_carrier_off(net);
1350 netif_tx_stop_all_queues(net);
1351 }
1352 kfree(event);
1353 break;
1354 case RNDIS_STATUS_NETWORK_CHANGE:
1355 /* Only makes sense if carrier is present */
1356 if (!rdev->link_state) {
1357 rdev->link_state = true;
1358 netif_carrier_off(net);
1359 netif_tx_stop_all_queues(net);
1360 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1361 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001362 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001363 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1364 reschedule = true;
1365 }
1366 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001367 }
1368
1369 rtnl_unlock();
1370
1371 if (notify)
1372 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001373
1374 /* link_watch only sends one notification with current state per
1375 * second, handle next reconfig event in 2 seconds.
1376 */
1377 if (reschedule)
1378 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001379
1380 return;
1381
1382out_unlock:
1383 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001384}
1385
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001386static struct net_device *get_netvsc_bymac(const u8 *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001387{
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001388 struct net_device *dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001389
Stephen Hemminger8737caa2016-08-23 12:17:44 -07001390 ASSERT_RTNL();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001391
1392 for_each_netdev(&init_net, dev) {
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001393 if (dev->netdev_ops != &device_ops)
1394 continue; /* not a netvsc device */
1395
1396 if (ether_addr_equal(mac, dev->perm_addr))
1397 return dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001398 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001399
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001400 return NULL;
1401}
1402
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001403static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001404{
1405 struct net_device *dev;
1406
1407 ASSERT_RTNL();
1408
1409 for_each_netdev(&init_net, dev) {
1410 struct net_device_context *net_device_ctx;
1411
1412 if (dev->netdev_ops != &device_ops)
1413 continue; /* not a netvsc device */
1414
1415 net_device_ctx = netdev_priv(dev);
1416 if (net_device_ctx->nvdev == NULL)
1417 continue; /* device is removed */
1418
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001419 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001420 return dev; /* a match */
1421 }
1422
1423 return NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001424}
1425
1426static int netvsc_register_vf(struct net_device *vf_netdev)
1427{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001428 struct net_device *ndev;
1429 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001430 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001431
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001432 if (vf_netdev->addr_len != ETH_ALEN)
1433 return NOTIFY_DONE;
1434
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001435 /*
1436 * We will use the MAC address to locate the synthetic interface to
1437 * associate with the VF interface. If we don't find a matching
1438 * synthetic interface, move on.
1439 */
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001440 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001441 if (!ndev)
1442 return NOTIFY_DONE;
1443
1444 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001445 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001446 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001447 return NOTIFY_DONE;
1448
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001449 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001450 /*
1451 * Take a reference on the module.
1452 */
1453 try_module_get(THIS_MODULE);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001454
1455 dev_hold(vf_netdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001456 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001457 return NOTIFY_OK;
1458}
1459
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001460static int netvsc_vf_up(struct net_device *vf_netdev)
1461{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001462 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001463 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001464 struct net_device_context *net_device_ctx;
1465
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001466 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001467 if (!ndev)
1468 return NOTIFY_DONE;
1469
1470 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001471 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001472
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001473 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001474
1475 /*
1476 * Open the device before switching data path.
1477 */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001478 rndis_filter_open(netvsc_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001479
1480 /*
1481 * notify the host to switch the data path.
1482 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001483 netvsc_switch_datapath(ndev, true);
1484 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001485
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001486 netif_carrier_off(ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001487
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001488 /* Now notify peers through VF device. */
1489 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001490
1491 return NOTIFY_OK;
1492}
1493
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001494static int netvsc_vf_down(struct net_device *vf_netdev)
1495{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001496 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001497 struct netvsc_device *netvsc_dev;
1498 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001499
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001500 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001501 if (!ndev)
1502 return NOTIFY_DONE;
1503
1504 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001505 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001506
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001507 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001508 netvsc_switch_datapath(ndev, false);
1509 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001510 rndis_filter_close(netvsc_dev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001511 netif_carrier_on(ndev);
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001512
1513 /* Now notify peers through netvsc device. */
1514 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001515
1516 return NOTIFY_OK;
1517}
1518
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001519static int netvsc_unregister_vf(struct net_device *vf_netdev)
1520{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001521 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001522 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001523
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001524 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001525 if (!ndev)
1526 return NOTIFY_DONE;
1527
1528 net_device_ctx = netdev_priv(ndev);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001529
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001530 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001531
1532 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001533 dev_put(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001534 module_put(THIS_MODULE);
1535 return NOTIFY_OK;
1536}
1537
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001538static int netvsc_probe(struct hv_device *dev,
1539 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001540{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001541 struct net_device *net = NULL;
1542 struct net_device_context *net_device_ctx;
1543 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001544 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001545 int ret;
1546
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001547 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08001548 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001549 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001550 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001551
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001552 netif_carrier_off(net);
1553
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001554 netvsc_init_settings(net);
1555
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001556 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001557 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001558 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1559 if (netif_msg_probe(net_device_ctx))
1560 netdev_dbg(net, "netvsc msg_enable: %d\n",
1561 net_device_ctx->msg_enable);
1562
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001563 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001564
Haiyang Zhang891de742014-02-12 16:54:27 -08001565 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +00001566 INIT_WORK(&net_device_ctx->work, do_set_multicast);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001567
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001568 spin_lock_init(&net_device_ctx->lock);
1569 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1570
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001571 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001572 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001573 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001574
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001575 /* We always need headroom for rndis header */
1576 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1577
Haiyang Zhang692e0842011-09-01 12:19:43 -07001578 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001579 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001580 device_info.ring_size = ring_size;
stephen hemminger3071ada2017-03-22 14:50:59 -07001581 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001582 ret = rndis_filter_device_add(dev, &device_info);
1583 if (ret != 0) {
1584 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001585 free_netdev(net);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001586 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001587 return ret;
1588 }
1589 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1590
stephen hemminger23312a32017-01-24 13:05:59 -08001591 /* hw_features computed in rndis_filter_device_add */
1592 net->features = net->hw_features |
1593 NETIF_F_HIGHDMA | NETIF_F_SG |
1594 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1595 net->vlan_features = net->features;
1596
stephen hemminger545a8e72017-03-22 14:51:00 -07001597 /* RCU not necessary here, device not registered */
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001598 nvdev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001599 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1600 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001601
Jarod Wilsond0c2c992016-10-20 13:55:21 -04001602 /* MTU range: 68 - 1500 or 65521 */
1603 net->min_mtu = NETVSC_MTU_MIN;
1604 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1605 net->max_mtu = NETVSC_MTU - ETH_HLEN;
1606 else
1607 net->max_mtu = ETH_DATA_LEN;
1608
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001609 ret = register_netdev(net);
1610 if (ret != 0) {
1611 pr_err("Unable to register netdev.\n");
stephen hemminger2289f0a2017-01-24 13:06:10 -08001612 rndis_filter_device_remove(dev, nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001613 free_netdev(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001614 }
1615
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001616 return ret;
1617}
1618
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001619static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001620{
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001621 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001622 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001623
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001624 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001625
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001626 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001627 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001628 return 0;
1629 }
1630
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001631 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001632
stephen hemmingera0be4502017-03-22 14:51:01 -07001633 netif_device_detach(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001634
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001635 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +00001636 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001637
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001638 /*
1639 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07001640 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001641 */
stephen hemmingera0be4502017-03-22 14:51:01 -07001642 rtnl_lock();
stephen hemminger2289f0a2017-01-24 13:06:10 -08001643 rndis_filter_device_remove(dev, ndev_ctx->nvdev);
stephen hemmingera0be4502017-03-22 14:51:01 -07001644 rtnl_unlock();
1645
1646 unregister_netdev(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001647
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001648 hv_set_drvdata(dev, NULL);
1649
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001650 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001651 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001652}
1653
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001654static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001655 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001656 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001657 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001658};
1659
1660MODULE_DEVICE_TABLE(vmbus, id_table);
1661
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001662/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001663static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001664 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001665 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001666 .probe = netvsc_probe,
1667 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001668};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001669
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001670/*
1671 * On Hyper-V, every VF interface is matched with a corresponding
1672 * synthetic interface. The synthetic interface is presented first
1673 * to the guest. When the corresponding VF instance is registered,
1674 * we will take care of switching the data path.
1675 */
1676static int netvsc_netdev_event(struct notifier_block *this,
1677 unsigned long event, void *ptr)
1678{
1679 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1680
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001681 /* Skip our own events */
1682 if (event_dev->netdev_ops == &device_ops)
1683 return NOTIFY_DONE;
1684
1685 /* Avoid non-Ethernet type devices */
1686 if (event_dev->type != ARPHRD_ETHER)
1687 return NOTIFY_DONE;
1688
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001689 /* Avoid Vlan dev with same MAC registering as VF */
Parav Panditd0d7b102017-02-04 11:00:49 -06001690 if (is_vlan_dev(event_dev))
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001691 return NOTIFY_DONE;
1692
1693 /* Avoid Bonding master dev with same MAC registering as VF */
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001694 if ((event_dev->priv_flags & IFF_BONDING) &&
1695 (event_dev->flags & IFF_MASTER))
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07001696 return NOTIFY_DONE;
1697
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001698 switch (event) {
1699 case NETDEV_REGISTER:
1700 return netvsc_register_vf(event_dev);
1701 case NETDEV_UNREGISTER:
1702 return netvsc_unregister_vf(event_dev);
1703 case NETDEV_UP:
1704 return netvsc_vf_up(event_dev);
1705 case NETDEV_DOWN:
1706 return netvsc_vf_down(event_dev);
1707 default:
1708 return NOTIFY_DONE;
1709 }
1710}
1711
1712static struct notifier_block netvsc_netdev_notifier = {
1713 .notifier_call = netvsc_netdev_event,
1714};
1715
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001716static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001717{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001718 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001719 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001720}
1721
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001722static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001723{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001724 int ret;
1725
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001726 if (ring_size < RING_SIZE_MIN) {
1727 ring_size = RING_SIZE_MIN;
1728 pr_info("Increased ring_size to %d (min allowed)\n",
1729 ring_size);
1730 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001731 ret = vmbus_driver_register(&netvsc_drv);
1732
1733 if (ret)
1734 return ret;
1735
1736 register_netdevice_notifier(&netvsc_netdev_notifier);
1737 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001738}
1739
Hank Janssen26c14cc2010-02-11 23:02:42 +00001740MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07001741MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001742
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001743module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001744module_exit(netvsc_drv_exit);