blob: a3a9d3898a6e8a80ddb21cb11864c09006e47554 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Hank Janssenfceaf242009-07-13 15:34:54 -070043struct net_device_context {
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070044 /* point back to our device context */
K. Y. Srinivasan6bad88d2011-03-07 13:35:48 -080045 struct hv_device *device_ctx;
Haiyang Zhang122a5f62011-05-27 06:21:55 -070046 struct delayed_work dwork;
Wenqi Ma792df872012-04-19 00:39:37 +000047 struct work_struct work;
Hank Janssenfceaf242009-07-13 15:34:54 -070048};
49
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000050#define RING_SIZE_MIN 64
Hank Janssen99c8da02010-10-12 10:45:23 -070051static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070052module_param(ring_size, int, S_IRUGO);
53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070054
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080055static void do_set_multicast(struct work_struct *w)
56{
Wenqi Ma792df872012-04-19 00:39:37 +000057 struct net_device_context *ndevctx =
58 container_of(w, struct net_device_context, work);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080059 struct netvsc_device *nvdev;
60 struct rndis_device *rdev;
61
62 nvdev = hv_get_drvdata(ndevctx->device_ctx);
Wenqi Ma792df872012-04-19 00:39:37 +000063 if (nvdev == NULL || nvdev->ndev == NULL)
64 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080065
66 rdev = nvdev->extension;
67 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000068 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080069
Wenqi Ma792df872012-04-19 00:39:37 +000070 if (nvdev->ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080071 rndis_filter_set_packet_filter(rdev,
72 NDIS_PACKET_TYPE_PROMISCUOUS);
73 else
74 rndis_filter_set_packet_filter(rdev,
75 NDIS_PACKET_TYPE_BROADCAST |
76 NDIS_PACKET_TYPE_ALL_MULTICAST |
77 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080078}
79
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070080static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070081{
Wenqi Ma792df872012-04-19 00:39:37 +000082 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080083
Wenqi Ma792df872012-04-19 00:39:37 +000084 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -070085}
86
Hank Janssenfceaf242009-07-13 15:34:54 -070087static int netvsc_open(struct net_device *net)
88{
Hank Janssenfceaf242009-07-13 15:34:54 -070089 struct net_device_context *net_device_ctx = netdev_priv(net);
K. Y. Srinivasan6bad88d2011-03-07 13:35:48 -080090 struct hv_device *device_obj = net_device_ctx->device_ctx;
Haiyang Zhang891de742014-02-12 16:54:27 -080091 struct netvsc_device *nvdev;
92 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -070093 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -070094
Haiyang Zhang891de742014-02-12 16:54:27 -080095 netif_carrier_off(net);
96
Haiyang Zhangd515d0f2011-09-28 13:24:15 -070097 /* Open up the device */
98 ret = rndis_filter_open(device_obj);
99 if (ret != 0) {
100 netdev_err(net, "unable to open device (ret %d).\n", ret);
101 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700102 }
103
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700104 netif_tx_start_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700105
Haiyang Zhang891de742014-02-12 16:54:27 -0800106 nvdev = hv_get_drvdata(device_obj);
107 rdev = nvdev->extension;
108 if (!rdev->link_state)
109 netif_carrier_on(net);
110
Hank Janssenfceaf242009-07-13 15:34:54 -0700111 return ret;
112}
113
Hank Janssenfceaf242009-07-13 15:34:54 -0700114static int netvsc_close(struct net_device *net)
115{
Hank Janssenfceaf242009-07-13 15:34:54 -0700116 struct net_device_context *net_device_ctx = netdev_priv(net);
K. Y. Srinivasan6bad88d2011-03-07 13:35:48 -0800117 struct hv_device *device_obj = net_device_ctx->device_ctx;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700118 int ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700119
Haiyang Zhang0a282532012-02-02 07:17:59 +0000120 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700121
Wenqi Ma792df872012-04-19 00:39:37 +0000122 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
123 cancel_work_sync(&net_device_ctx->work);
Haiyang Zhang9c26aa02010-12-10 12:03:57 -0800124 ret = rndis_filter_close(device_obj);
Hank Janssenfceaf242009-07-13 15:34:54 -0700125 if (ret != 0)
Hank Jansseneb335bc2011-03-29 13:58:48 -0700126 netdev_err(net, "unable to close device (ret %d).\n", ret);
Hank Janssenfceaf242009-07-13 15:34:54 -0700127
Hank Janssenfceaf242009-07-13 15:34:54 -0700128 return ret;
129}
130
KY Srinivasan8a002512014-03-08 19:23:14 -0800131static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
132 int pkt_type)
133{
134 struct rndis_packet *rndis_pkt;
135 struct rndis_per_packet_info *ppi;
136
137 rndis_pkt = &msg->msg.pkt;
138 rndis_pkt->data_offset += ppi_size;
139
140 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
141 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
142
143 ppi->size = ppi_size;
144 ppi->type = pkt_type;
145 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
146
147 rndis_pkt->per_pkt_info_len += ppi_size;
148
149 return ppi;
150}
151
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700152union sub_key {
153 u64 k;
154 struct {
155 u8 pad[3];
156 u8 kb;
157 u32 ka;
158 };
159};
160
161/* Toeplitz hash function
162 * data: network byte order
163 * return: host byte order
164 */
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700165static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700166{
167 union sub_key subk;
168 int k_next = 4;
169 u8 dt;
170 int i, j;
171 u32 ret = 0;
172
173 subk.k = 0;
174 subk.ka = ntohl(*(u32 *)key);
175
176 for (i = 0; i < dlen; i++) {
177 subk.kb = key[k_next];
178 k_next = (k_next + 1) % klen;
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700179 dt = ((u8 *)data)[i];
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700180 for (j = 0; j < 8; j++) {
181 if (dt & 0x80)
182 ret ^= subk.ka;
183 dt <<= 1;
184 subk.k <<= 1;
185 }
186 }
187
188 return ret;
189}
190
191static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
192{
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700193 struct flow_keys flow;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700194 int data_len;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700195
Haiyang Zhang4c874542014-10-30 14:07:17 -0700196 if (!skb_flow_dissect(skb, &flow) ||
197 !(flow.n_proto == htons(ETH_P_IP) ||
198 flow.n_proto == htons(ETH_P_IPV6)))
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700199 return false;
200
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700201 if (flow.ip_proto == IPPROTO_TCP)
202 data_len = 12;
203 else
204 data_len = 8;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700205
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700206 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700207
Haiyang Zhangf88e6712014-10-16 14:47:58 -0700208 return true;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700209}
210
211static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
212 void *accel_priv, select_queue_fallback_t fallback)
213{
214 struct net_device_context *net_device_ctx = netdev_priv(ndev);
215 struct hv_device *hdev = net_device_ctx->device_ctx;
216 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
217 u32 hash;
218 u16 q_idx = 0;
219
220 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
221 return 0;
222
Haiyang Zhang307f0992014-05-21 12:55:39 -0700223 if (netvsc_set_hash(&hash, skb)) {
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700224 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
225 ndev->real_num_tx_queues;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700226 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
227 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700228
229 return q_idx;
230}
231
Haiyang Zhangee90b812015-04-06 15:22:54 -0700232void netvsc_xmit_completion(void *context)
Hank Janssenfceaf242009-07-13 15:34:54 -0700233{
Nicolas Palix4193d4f2009-07-29 14:10:10 +0200234 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700235 struct sk_buff *skb = (struct sk_buff *)
Haiyang Zhang893f6622014-04-21 14:54:44 -0700236 (unsigned long)packet->send_completion_tid;
Hank Janssenfceaf242009-07-13 15:34:54 -0700237
KY Srinivasanb08cc792015-03-29 21:08:42 -0700238 if (!packet->part_of_skb)
239 kfree(packet);
Hank Janssenfceaf242009-07-13 15:34:54 -0700240
KY Srinivasancbacec72015-03-29 21:08:41 -0700241 if (skb)
Hank Janssenfceaf242009-07-13 15:34:54 -0700242 dev_kfree_skb_any(skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700243}
244
KY Srinivasan54a73572014-03-08 19:23:13 -0800245static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
246 struct hv_page_buffer *pb)
247{
248 int j = 0;
249
250 /* Deal with compund pages by ignoring unused part
251 * of the page.
252 */
253 page += (offset >> PAGE_SHIFT);
254 offset &= ~PAGE_MASK;
255
256 while (len > 0) {
257 unsigned long bytes;
258
259 bytes = PAGE_SIZE - offset;
260 if (bytes > len)
261 bytes = len;
262 pb[j].pfn = page_to_pfn(page);
263 pb[j].offset = offset;
264 pb[j].len = bytes;
265
266 offset += bytes;
267 len -= bytes;
268
269 if (offset == PAGE_SIZE && len) {
270 page++;
271 offset = 0;
272 j++;
273 }
274 }
275
276 return j + 1;
277}
278
KY Srinivasan8a002512014-03-08 19:23:14 -0800279static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700280 struct hv_netvsc_packet *packet)
KY Srinivasan54a73572014-03-08 19:23:13 -0800281{
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700282 struct hv_page_buffer *pb = packet->page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800283 u32 slots_used = 0;
284 char *data = skb->data;
285 int frags = skb_shinfo(skb)->nr_frags;
286 int i;
287
288 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700289 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800290 * 2. skb linear data
291 * 3. skb fragment data
292 */
293 if (hdr != NULL)
294 slots_used += fill_pg_buf(virt_to_page(hdr),
295 offset_in_page(hdr),
296 len, &pb[slots_used]);
297
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700298 packet->rmsg_size = len;
299 packet->rmsg_pgcnt = slots_used;
300
KY Srinivasan54a73572014-03-08 19:23:13 -0800301 slots_used += fill_pg_buf(virt_to_page(data),
302 offset_in_page(data),
303 skb_headlen(skb), &pb[slots_used]);
304
305 for (i = 0; i < frags; i++) {
306 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
307
308 slots_used += fill_pg_buf(skb_frag_page(frag),
309 frag->page_offset,
310 skb_frag_size(frag), &pb[slots_used]);
311 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800312 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800313}
314
315static int count_skb_frag_slots(struct sk_buff *skb)
316{
317 int i, frags = skb_shinfo(skb)->nr_frags;
318 int pages = 0;
319
320 for (i = 0; i < frags; i++) {
321 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
322 unsigned long size = skb_frag_size(frag);
323 unsigned long offset = frag->page_offset;
324
325 /* Skip unused frames from start of page */
326 offset &= ~PAGE_MASK;
327 pages += PFN_UP(offset + size);
328 }
329 return pages;
330}
331
332static int netvsc_get_slots(struct sk_buff *skb)
333{
334 char *data = skb->data;
335 unsigned int offset = offset_in_page(data);
336 unsigned int len = skb_headlen(skb);
337 int slots;
338 int frag_slots;
339
340 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
341 frag_slots = count_skb_frag_slots(skb);
342 return slots + frag_slots;
343}
344
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800345static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
346{
347 u32 ret_val = TRANSPORT_INFO_NOT_IP;
348
349 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
350 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
351 goto not_ip;
352 }
353
354 *trans_off = skb_transport_offset(skb);
355
356 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
357 struct iphdr *iphdr = ip_hdr(skb);
358
359 if (iphdr->protocol == IPPROTO_TCP)
360 ret_val = TRANSPORT_INFO_IPV4_TCP;
361 else if (iphdr->protocol == IPPROTO_UDP)
362 ret_val = TRANSPORT_INFO_IPV4_UDP;
363 } else {
364 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
365 ret_val = TRANSPORT_INFO_IPV6_TCP;
366 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
367 ret_val = TRANSPORT_INFO_IPV6_UDP;
368 }
369
370not_ip:
371 return ret_val;
372}
373
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700374static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700375{
Hank Janssenfceaf242009-07-13 15:34:54 -0700376 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200377 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700378 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800379 unsigned int num_data_pgs;
380 struct rndis_message *rndis_msg;
381 struct rndis_packet *rndis_pkt;
382 u32 rndis_msg_size;
383 bool isvlan;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200384 bool linear = false;
KY Srinivasan8a002512014-03-08 19:23:14 -0800385 struct rndis_per_packet_info *ppi;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800386 struct ndis_tcp_ip_checksum_info *csum_info;
KY Srinivasan77bf5482014-03-08 19:23:18 -0800387 struct ndis_tcp_lso_info *lso_info;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800388 int hdr_offset;
389 u32 net_trans_info;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700390 u32 hash;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200391 u32 skb_length;
392 u32 head_room;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700393 u32 pkt_sz;
394 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800395
Hank Janssenfceaf242009-07-13 15:34:54 -0700396
KY Srinivasan54a73572014-03-08 19:23:13 -0800397 /* We will atmost need two pages to describe the rndis
398 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200399 * of pages in a single packet. If skb is scattered around
400 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800401 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200402
403check_size:
404 skb_length = skb->len;
405 head_room = skb_headroom(skb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800406 num_data_pgs = netvsc_get_slots(skb) + 2;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200407 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
408 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
409 num_data_pgs, skb->len);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200410 ret = -EFAULT;
411 goto drop;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200412 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
413 if (skb_linearize(skb)) {
414 net_alert_ratelimited("failed to linearize skb\n");
415 ret = -ENOMEM;
416 goto drop;
417 }
418 linear = true;
419 goto check_size;
KY Srinivasan54a73572014-03-08 19:23:13 -0800420 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700421
Haiyang Zhang72151422015-04-06 15:22:53 -0700422 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
Stephen Hemmingerb220f5f2010-05-04 09:58:56 -0700423
KY Srinivasanb08cc792015-03-29 21:08:42 -0700424 if (head_room < pkt_sz) {
425 packet = kmalloc(pkt_sz, GFP_ATOMIC);
426 if (!packet) {
427 /* out of memory, drop packet */
428 netdev_err(net, "unable to alloc hv_netvsc_packet\n");
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200429 ret = -ENOMEM;
430 goto drop;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700431 }
432 packet->part_of_skb = false;
433 } else {
434 /* Use the headroom for building up the packet */
435 packet = (struct hv_netvsc_packet *)skb->head;
436 packet->part_of_skb = true;
Hank Janssenfceaf242009-07-13 15:34:54 -0700437 }
438
KY Srinivasanb08cc792015-03-29 21:08:42 -0700439 packet->status = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700440 packet->xmit_more = skb->xmit_more;
441
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000442 packet->vlan_tci = skb->vlan_tci;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700443 packet->page_buf = page_buf;
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000444
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700445 packet->q_idx = skb_get_queue_mapping(skb);
446
KY Srinivasan8a002512014-03-08 19:23:14 -0800447 packet->is_data_pkt = true;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800448 packet->total_data_buflen = skb->len;
Hank Janssenfceaf242009-07-13 15:34:54 -0700449
KY Srinivasan8a002512014-03-08 19:23:14 -0800450 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
KY Srinivasanb08cc792015-03-29 21:08:42 -0700451 sizeof(struct hv_netvsc_packet));
452
Haiyang Zhang72151422015-04-06 15:22:53 -0700453 memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700454
Bill Pemberton454f18a2009-07-27 16:47:24 -0400455 /* Set the completion routine */
Haiyang Zhang893f6622014-04-21 14:54:44 -0700456 packet->send_completion = netvsc_xmit_completion;
457 packet->send_completion_ctx = packet;
458 packet->send_completion_tid = (unsigned long)skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700459
KY Srinivasan8a002512014-03-08 19:23:14 -0800460 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
461
462 /* Add the rndis header */
463 rndis_msg = packet->rndis_msg;
464 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
465 rndis_msg->msg_len = packet->total_data_buflen;
466 rndis_pkt = &rndis_msg->msg.pkt;
467 rndis_pkt->data_offset = sizeof(struct rndis_packet);
468 rndis_pkt->data_len = packet->total_data_buflen;
469 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
470
471 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
472
Haiyang Zhang307f0992014-05-21 12:55:39 -0700473 hash = skb_get_hash_raw(skb);
474 if (hash != 0 && net->real_num_tx_queues > 1) {
475 rndis_msg_size += NDIS_HASH_PPI_SIZE;
476 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
477 NBL_HASH_VALUE);
478 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
479 }
480
KY Srinivasan8a002512014-03-08 19:23:14 -0800481 if (isvlan) {
482 struct ndis_pkt_8021q_info *vlan;
483
484 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
485 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
486 IEEE_8021Q_INFO);
487 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
488 ppi->ppi_offset);
489 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
490 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
491 VLAN_PRIO_SHIFT;
492 }
493
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800494 net_trans_info = get_net_transport_info(skb, &hdr_offset);
495 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
496 goto do_send;
497
498 /*
499 * Setup the sendside checksum offload only if this is not a
500 * GSO packet.
501 */
502 if (skb_is_gso(skb))
KY Srinivasan77bf5482014-03-08 19:23:18 -0800503 goto do_lso;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800504
KY Srinivasan22041fb2014-04-30 11:58:25 -0700505 if ((skb->ip_summed == CHECKSUM_NONE) ||
506 (skb->ip_summed == CHECKSUM_UNNECESSARY))
507 goto do_send;
508
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800509 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
510 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
511 TCPIP_CHKSUM_PKTINFO);
512
513 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
514 ppi->ppi_offset);
515
516 if (net_trans_info & (INFO_IPV4 << 16))
517 csum_info->transmit.is_ipv4 = 1;
518 else
519 csum_info->transmit.is_ipv6 = 1;
520
521 if (net_trans_info & INFO_TCP) {
522 csum_info->transmit.tcp_checksum = 1;
523 csum_info->transmit.tcp_header_offset = hdr_offset;
524 } else if (net_trans_info & INFO_UDP) {
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700525 /* UDP checksum offload is not supported on ws2008r2.
526 * Furthermore, on ws2012 and ws2012r2, there are some
527 * issues with udp checksum offload from Linux guests.
528 * (these are host issues).
529 * For now compute the checksum here.
530 */
531 struct udphdr *uh;
532 u16 udp_len;
533
534 ret = skb_cow_head(skb, 0);
535 if (ret)
536 goto drop;
537
538 uh = udp_hdr(skb);
539 udp_len = ntohs(uh->len);
540 uh->check = 0;
541 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
542 ip_hdr(skb)->daddr,
543 udp_len, IPPROTO_UDP,
544 csum_partial(uh, udp_len, 0));
545 if (uh->check == 0)
546 uh->check = CSUM_MANGLED_0;
547
548 csum_info->transmit.udp_checksum = 0;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800549 }
KY Srinivasan77bf5482014-03-08 19:23:18 -0800550 goto do_send;
551
552do_lso:
553 rndis_msg_size += NDIS_LSO_PPI_SIZE;
554 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
555 TCP_LARGESEND_PKTINFO);
556
557 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
558 ppi->ppi_offset);
559
560 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
561 if (net_trans_info & (INFO_IPV4 << 16)) {
562 lso_info->lso_v2_transmit.ip_version =
563 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
564 ip_hdr(skb)->tot_len = 0;
565 ip_hdr(skb)->check = 0;
566 tcp_hdr(skb)->check =
567 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
568 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
569 } else {
570 lso_info->lso_v2_transmit.ip_version =
571 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
572 ipv6_hdr(skb)->payload_len = 0;
573 tcp_hdr(skb)->check =
574 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
575 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
576 }
577 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
578 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800579
580do_send:
KY Srinivasan8a002512014-03-08 19:23:14 -0800581 /* Start filling in the page buffers with the rndis hdr */
582 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700583 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800584 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700585 skb, packet);
KY Srinivasan8a002512014-03-08 19:23:14 -0800586
587 ret = netvsc_send(net_device_ctx->device_ctx, packet);
588
KY Srinivasanaf9893a2014-04-09 15:00:47 -0700589drop:
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700590 if (ret == 0) {
KY Srinivasandedb8452014-09-28 22:16:43 -0700591 net->stats.tx_bytes += skb_length;
Stephen Hemmingerb852fdc2010-03-09 17:42:33 -0800592 net->stats.tx_packets++;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700593 } else {
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200594 if (packet && !packet->part_of_skb)
KY Srinivasanb08cc792015-03-29 21:08:42 -0700595 kfree(packet);
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000596 if (ret != -EAGAIN) {
597 dev_kfree_skb_any(skb);
598 net->stats.tx_dropped++;
599 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700600 }
601
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000602 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
Hank Janssenfceaf242009-07-13 15:34:54 -0700603}
604
Hank Janssen3e189512010-03-04 22:11:00 +0000605/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700606 * netvsc_linkstatus_callback - Link up/down notification
607 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700608void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700609 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700610{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700611 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700612 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700613 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700614 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -0800615 struct rndis_device *rdev;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700616
617 net_device = hv_get_drvdata(device_obj);
Haiyang Zhang891de742014-02-12 16:54:27 -0800618 rdev = net_device->extension;
619
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700620 switch (indicate->status) {
621 case RNDIS_STATUS_MEDIA_CONNECT:
622 rdev->link_state = false;
623 break;
624 case RNDIS_STATUS_MEDIA_DISCONNECT:
625 rdev->link_state = true;
626 break;
627 case RNDIS_STATUS_NETWORK_CHANGE:
628 rdev->link_change = true;
629 break;
630 default:
631 return;
632 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800633
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700634 net = net_device->ndev;
Hank Janssenfceaf242009-07-13 15:34:54 -0700635
Haiyang Zhang891de742014-02-12 16:54:27 -0800636 if (!net || net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700637 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700638
Haiyang Zhang891de742014-02-12 16:54:27 -0800639 ndev_ctx = netdev_priv(net);
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700640 if (!rdev->link_state) {
Haiyang Zhangc4b6a2e2011-09-01 12:19:42 -0700641 schedule_delayed_work(&ndev_ctx->dwork, 0);
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700642 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700643 } else {
Haiyang Zhang891de742014-02-12 16:54:27 -0800644 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700645 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700646}
647
Hank Janssen3e189512010-03-04 22:11:00 +0000648/*
649 * netvsc_recv_callback - Callback when we receive a packet from the
650 * "wire" on the specified device.
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700651 */
K. Y. Srinivasanf79adf82011-05-12 19:34:51 -0700652int netvsc_recv_callback(struct hv_device *device_obj,
KY Srinivasane3d605e2014-03-08 19:23:16 -0800653 struct hv_netvsc_packet *packet,
654 struct ndis_tcp_ip_checksum_info *csum_info)
Hank Janssenfceaf242009-07-13 15:34:54 -0700655{
Haiyang Zhang6f4c4442012-02-05 12:13:09 +0000656 struct net_device *net;
Hank Janssenfceaf242009-07-13 15:34:54 -0700657 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700658
Haiyang Zhang6f4c4442012-02-05 12:13:09 +0000659 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
Haiyang Zhanga68f9612013-12-20 16:52:31 -0800660 if (!net || net->reg_state != NETREG_REGISTERED) {
Haiyang Zhang63f69212012-10-02 05:30:23 +0000661 packet->status = NVSP_STAT_FAIL;
Hank Janssenfceaf242009-07-13 15:34:54 -0700662 return 0;
663 }
664
Stephen Hemminger9495c282010-03-09 17:42:17 -0800665 /* Allocate a skb - TODO direct I/O to pages? */
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800666 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800667 if (unlikely(!skb)) {
668 ++net->stats.rx_dropped;
Haiyang Zhang63f69212012-10-02 05:30:23 +0000669 packet->status = NVSP_STAT_FAIL;
Stephen Hemminger9495c282010-03-09 17:42:17 -0800670 return 0;
671 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700672
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700673 /*
674 * Copy to skb. This copy is needed here since the memory pointed by
675 * hv_netvsc_packet cannot be deallocated
676 */
Haiyang Zhang45326342011-12-15 13:45:15 -0800677 memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
678 packet->total_data_buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700679
680 skb->protocol = eth_type_trans(skb, net);
KY Srinivasane3d605e2014-03-08 19:23:16 -0800681 if (csum_info) {
682 /* We only look at the IP checksum here.
683 * Should we be dropping the packet if checksum
684 * failed? How do we deal with other checksums - TCP/UDP?
685 */
686 if (csum_info->receive.ip_checksum_succeeded)
687 skb->ip_summed = CHECKSUM_UNNECESSARY;
688 else
689 skb->ip_summed = CHECKSUM_NONE;
690 }
691
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700692 if (packet->vlan_tci & VLAN_TAG_PRESENT)
693 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
694 packet->vlan_tci);
Hank Janssenfceaf242009-07-13 15:34:54 -0700695
Haiyang Zhang4baab262014-04-21 14:54:43 -0700696 skb_record_rx_queue(skb, packet->channel->
Haiyang Zhange565e802014-05-08 15:14:10 -0700697 offermsg.offer.sub_channel_index);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700698
Stephen Hemminger9495c282010-03-09 17:42:17 -0800699 net->stats.rx_packets++;
Wei Yongjun48c38832012-01-29 22:14:02 +0000700 net->stats.rx_bytes += packet->total_data_buflen;
Stephen Hemminger9495c282010-03-09 17:42:17 -0800701
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700702 /*
703 * Pass the skb back up. Network stack will deallocate the skb when it
Stephen Hemminger9495c282010-03-09 17:42:17 -0800704 * is done.
705 * TODO - use NAPI?
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700706 */
Stephen Hemminger9495c282010-03-09 17:42:17 -0800707 netif_rx(skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700708
Hank Janssenfceaf242009-07-13 15:34:54 -0700709 return 0;
710}
711
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700712static void netvsc_get_drvinfo(struct net_device *net,
713 struct ethtool_drvinfo *info)
714{
Jiri Pirko7826d432013-01-06 00:44:26 +0000715 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000716 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700717}
718
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800719static void netvsc_get_channels(struct net_device *net,
720 struct ethtool_channels *channel)
721{
722 struct net_device_context *net_device_ctx = netdev_priv(net);
723 struct hv_device *dev = net_device_ctx->device_ctx;
724 struct netvsc_device *nvdev = hv_get_drvdata(dev);
725
726 if (nvdev) {
727 channel->max_combined = nvdev->max_chn;
728 channel->combined_count = nvdev->num_chn;
729 }
730}
731
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800732static int netvsc_change_mtu(struct net_device *ndev, int mtu)
733{
734 struct net_device_context *ndevctx = netdev_priv(ndev);
735 struct hv_device *hdev = ndevctx->device_ctx;
736 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
737 struct netvsc_device_info device_info;
738 int limit = ETH_DATA_LEN;
739
740 if (nvdev == NULL || nvdev->destroy)
741 return -ENODEV;
742
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800743 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
Haiyang Zhang4d3c9d32014-11-12 14:07:44 -0800744 limit = NETVSC_MTU - ETH_HLEN;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800745
Haiyang Zhang4d3c9d32014-11-12 14:07:44 -0800746 /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
747 if (mtu < ETH_DATA_LEN || mtu > limit)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800748 return -EINVAL;
749
750 nvdev->start_remove = true;
Wenqi Ma792df872012-04-19 00:39:37 +0000751 cancel_work_sync(&ndevctx->work);
Haiyang Zhang0a282532012-02-02 07:17:59 +0000752 netif_tx_disable(ndev);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800753 rndis_filter_device_remove(hdev);
754
755 ndev->mtu = mtu;
756
757 ndevctx->device_ctx = hdev;
758 hv_set_drvdata(hdev, ndev);
759 device_info.ring_size = ring_size;
760 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700761 netif_tx_wake_all_queues(ndev);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800762
763 return 0;
764}
765
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000766
767static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
768{
769 struct net_device_context *ndevctx = netdev_priv(ndev);
770 struct hv_device *hdev = ndevctx->device_ctx;
771 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000772 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000773 unsigned char save_aatype;
774 int err;
775
776 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
777 save_aatype = ndev->addr_assign_type;
778
779 err = eth_mac_addr(ndev, p);
780 if (err != 0)
781 return err;
782
783 err = rndis_filter_set_device_mac(hdev, addr->sa_data);
784 if (err != 0) {
785 /* roll back to saved MAC */
786 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
787 ndev->addr_assign_type = save_aatype;
788 }
789
790 return err;
791}
792
Richard Weinberger316158f2014-07-09 16:23:59 +0200793#ifdef CONFIG_NET_POLL_CONTROLLER
794static void netvsc_poll_controller(struct net_device *net)
795{
796 /* As netvsc_start_xmit() works synchronous we don't have to
797 * trigger anything here.
798 */
799}
800#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000801
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700802static const struct ethtool_ops ethtool_ops = {
803 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700804 .get_link = ethtool_op_get_link,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800805 .get_channels = netvsc_get_channels,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700806};
807
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700808static const struct net_device_ops device_ops = {
809 .ndo_open = netvsc_open,
810 .ndo_stop = netvsc_close,
811 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000812 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800813 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +0000814 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000815 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700816 .ndo_select_queue = netvsc_select_queue,
Richard Weinberger316158f2014-07-09 16:23:59 +0200817#ifdef CONFIG_NET_POLL_CONTROLLER
818 .ndo_poll_controller = netvsc_poll_controller,
819#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700820};
821
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700822/*
823 * Send GARP packet to network peers after migrations.
824 * After Quick Migration, the network is not immediately operational in the
825 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700826 * another netif_notify_peers() into a delayed work, otherwise GARP packet
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700827 * will not be sent after quick migration, and cause network disconnection.
Haiyang Zhang891de742014-02-12 16:54:27 -0800828 * Also, we update the carrier status here.
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700829 */
Haiyang Zhang891de742014-02-12 16:54:27 -0800830static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700831{
832 struct net_device_context *ndev_ctx;
833 struct net_device *net;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700834 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -0800835 struct rndis_device *rdev;
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700836 bool notify, refresh = false;
837 char *argv[] = { "/etc/init.d/network", "restart", NULL };
838 char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
Haiyang Zhang891de742014-02-12 16:54:27 -0800839
840 rtnl_lock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700841
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700842 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700843 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
Haiyang Zhang891de742014-02-12 16:54:27 -0800844 rdev = net_device->extension;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700845 net = net_device->ndev;
Haiyang Zhang891de742014-02-12 16:54:27 -0800846
847 if (rdev->link_state) {
848 netif_carrier_off(net);
849 notify = false;
850 } else {
851 netif_carrier_on(net);
852 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700853 if (rdev->link_change) {
854 rdev->link_change = false;
855 refresh = true;
856 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800857 }
858
859 rtnl_unlock();
860
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700861 if (refresh)
862 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
863
Haiyang Zhang891de742014-02-12 16:54:27 -0800864 if (notify)
865 netdev_notify_peers(net);
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700866}
867
868
K. Y. Srinivasan84946892011-09-13 10:59:38 -0700869static int netvsc_probe(struct hv_device *dev,
870 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700871{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700872 struct net_device *net = NULL;
873 struct net_device_context *net_device_ctx;
874 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700875 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700876 int ret;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700877 u32 max_needed_headroom;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700878
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700879 net = alloc_etherdev_mq(sizeof(struct net_device_context),
880 num_online_cpus());
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700881 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -0700882 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700883
KY Srinivasanb08cc792015-03-29 21:08:42 -0700884 max_needed_headroom = sizeof(struct hv_netvsc_packet) +
Haiyang Zhang72151422015-04-06 15:22:53 -0700885 RNDIS_AND_PPI_SIZE;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700886
Haiyang Zhang1b07da52014-03-04 14:11:06 -0800887 netif_carrier_off(net);
888
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700889 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -0700890 net_device_ctx->device_ctx = dev;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700891 hv_set_drvdata(dev, net);
Haiyang Zhang891de742014-02-12 16:54:27 -0800892 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +0000893 INIT_WORK(&net_device_ctx->work, do_set_multicast);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700894
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700895 net->netdev_ops = &device_ops;
896
KY Srinivasan77bf5482014-03-08 19:23:18 -0800897 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
898 NETIF_F_TSO;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800899 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
KY Srinivasan77bf5482014-03-08 19:23:18 -0800900 NETIF_F_IP_CSUM | NETIF_F_TSO;
Stephen Hemminger60487182010-05-04 09:58:55 -0700901
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000902 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -0700903 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700904
KY Srinivasanb08cc792015-03-29 21:08:42 -0700905 /*
906 * Request additional head room in the skb.
907 * We will use this space to build the rndis
908 * heaser and other state we need to maintain.
909 */
910 net->needed_headroom = max_needed_headroom;
911
Haiyang Zhang692e0842011-09-01 12:19:43 -0700912 /* Notify the netvsc driver of the new device */
913 device_info.ring_size = ring_size;
914 ret = rndis_filter_device_add(dev, &device_info);
915 if (ret != 0) {
916 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
Haiyang Zhang692e0842011-09-01 12:19:43 -0700917 free_netdev(net);
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700918 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -0700919 return ret;
920 }
921 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
922
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700923 nvdev = hv_get_drvdata(dev);
924 netif_set_real_num_tx_queues(net, nvdev->num_chn);
925 netif_set_real_num_rx_queues(net, nvdev->num_chn);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700926
Haiyang Zhanga68f9612013-12-20 16:52:31 -0800927 ret = register_netdev(net);
928 if (ret != 0) {
929 pr_err("Unable to register netdev.\n");
930 rndis_filter_device_remove(dev);
931 free_netdev(net);
Haiyang Zhang1b07da52014-03-04 14:11:06 -0800932 } else {
933 schedule_delayed_work(&net_device_ctx->dwork, 0);
Haiyang Zhanga68f9612013-12-20 16:52:31 -0800934 }
935
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700936 return ret;
937}
938
K. Y. Srinivasan415b0232011-04-29 13:45:12 -0700939static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700940{
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700941 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700942 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e52011-09-13 10:59:49 -0700943 struct netvsc_device *net_device;
944
945 net_device = hv_get_drvdata(dev);
946 net = net_device->ndev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700947
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700948 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -0700949 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700950 return 0;
951 }
952
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800953 net_device->start_remove = true;
954
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700955 ndev_ctx = netdev_priv(net);
956 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +0000957 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -0700958
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700959 /* Stop outbound asap */
Haiyang Zhang0a282532012-02-02 07:17:59 +0000960 netif_tx_disable(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700961
962 unregister_netdev(net);
963
964 /*
965 * Call to the vsc driver to let it know that the device is being
966 * removed
967 */
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -0700968 rndis_filter_device_remove(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700969
970 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -0700971 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700972}
973
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -0700974static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -0700975 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -0800976 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -0700977 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -0700978};
979
980MODULE_DEVICE_TABLE(vmbus, id_table);
981
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -0700982/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -0700983static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +0000984 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -0700985 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -0700986 .probe = netvsc_probe,
987 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -0700988};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -0700989
K. Y. Srinivasana9869c92011-05-12 19:35:17 -0700990static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -0700991{
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -0700992 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -0700993}
994
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -0700995static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -0700996{
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +0000997 if (ring_size < RING_SIZE_MIN) {
998 ring_size = RING_SIZE_MIN;
999 pr_info("Increased ring_size to %d (min allowed)\n",
1000 ring_size);
1001 }
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001002 return vmbus_driver_register(&netvsc_drv);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001003}
1004
Hank Janssen26c14cc2010-02-11 23:02:42 +00001005MODULE_LICENSE("GPL");
Stephen Hemminger7880fc54c2010-05-04 09:58:52 -07001006MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001007
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001008module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001009module_exit(netvsc_drv_exit);