blob: 0a2e9bd98d2ce39ea0095c1422655f7a54a5cb20 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Greg Kroah-Hartman5654e932009-07-14 15:08:20 -070022#include <linux/kernel.h>
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -080023#include <linux/sched.h>
24#include <linux/wait.h>
Greg Kroah-Hartman0ffa63b2009-07-15 11:06:01 -070025#include <linux/mm.h>
Greg Kroah-Hartmanb4362c92009-07-16 11:50:41 -070026#include <linux/delay.h>
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -070027#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Haiyang Zhangd9871152011-09-01 12:19:41 -070029#include <linux/netdevice.h>
Haiyang Zhangf157e782011-12-15 13:45:16 -080030#include <linux/if_ether.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100031#include <linux/vmalloc.h>
KY Srinivasanc25aaf82014-04-30 10:14:31 -070032#include <asm/sync_bitops.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070033
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070034#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070035
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070036/*
37 * Switch the data path from the synthetic interface to the VF
38 * interface.
39 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020040void netvsc_switch_datapath(struct net_device *ndev, bool vf)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070041{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +020042 struct net_device_context *net_device_ctx = netdev_priv(ndev);
43 struct hv_device *dev = net_device_ctx->device_ctx;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020044 struct netvsc_device *nv_dev = net_device_ctx->nvdev;
45 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070046
47 memset(init_pkt, 0, sizeof(struct nvsp_message));
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49 if (vf)
50 init_pkt->msg.v4_msg.active_dp.active_datapath =
51 NVSP_DATAPATH_VF;
52 else
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_SYNTHETIC;
55
56 vmbus_sendpacket(dev->channel, init_pkt,
57 sizeof(struct nvsp_message),
58 (unsigned long)init_pkt,
59 VM_PKT_DATA_INBAND, 0);
60}
61
Vitaly Kuznetsov88098832016-05-13 13:55:25 +020062static struct netvsc_device *alloc_net_device(void)
Hank Janssenfceaf242009-07-13 15:34:54 -070063{
Haiyang Zhang85799a32010-12-10 12:03:54 -080064 struct netvsc_device *net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070065
Haiyang Zhang85799a32010-12-10 12:03:54 -080066 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
67 if (!net_device)
Hank Janssenfceaf242009-07-13 15:34:54 -070068 return NULL;
69
stephen hemmingerb8b835a2017-01-24 13:06:07 -080070 net_device->chan_table[0].mrc.buf
71 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070072
Haiyang Zhangdc5cd892012-06-04 06:42:38 +000073 init_waitqueue_head(&net_device->wait_drain);
K. Y. Srinivasanc38b9c72011-08-27 11:31:12 -070074 net_device->destroy = false;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070075 atomic_set(&net_device->open_cnt, 0);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070076 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
77 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
Stephen Hemmingerfd612602016-08-23 12:17:51 -070078 init_completion(&net_device->channel_init_wait);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070079
Haiyang Zhang85799a32010-12-10 12:03:54 -080080 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070081}
82
Haiyang Zhangf90251c2014-08-15 19:18:19 +000083static void free_netvsc_device(struct netvsc_device *nvdev)
84{
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070085 int i;
86
87 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
stephen hemmingerb8b835a2017-01-24 13:06:07 -080088 vfree(nvdev->chan_table[i].mrc.buf);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070089
Haiyang Zhangf90251c2014-08-15 19:18:19 +000090 kfree(nvdev);
91}
92
stephen hemminger46b4f7f2017-01-24 13:06:11 -080093
Haiyang Zhang5a71ae32010-12-10 12:03:55 -080094static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -070095{
Vitaly Kuznetsov26254662016-06-03 17:50:59 +020096 struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
Hank Janssenfceaf242009-07-13 15:34:54 -070097
K. Y. Srinivasan9d88f332011-08-27 11:31:16 -070098 if (net_device && net_device->destroy)
Haiyang Zhang85799a32010-12-10 12:03:54 -080099 net_device = NULL;
Hank Janssenfceaf242009-07-13 15:34:54 -0700100
Haiyang Zhang85799a32010-12-10 12:03:54 -0800101 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -0700102}
103
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700104static void netvsc_destroy_buf(struct hv_device *device)
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700105{
106 struct nvsp_message *revoke_packet;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200107 struct net_device *ndev = hv_get_drvdata(device);
Vitaly Kuznetsov26254662016-06-03 17:50:59 +0200108 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700109 int ret;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700110
111 /*
112 * If we got a section count, it means we received a
113 * SendReceiveBufferComplete msg (ie sent
114 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
115 * to send a revoke msg here
116 */
117 if (net_device->recv_section_cnt) {
118 /* Send the revoke receive buffer */
119 revoke_packet = &net_device->revoke_packet;
120 memset(revoke_packet, 0, sizeof(struct nvsp_message));
121
122 revoke_packet->hdr.msg_type =
123 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
124 revoke_packet->msg.v1_msg.
125 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
126
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200127 ret = vmbus_sendpacket(device->channel,
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700128 revoke_packet,
129 sizeof(struct nvsp_message),
130 (unsigned long)revoke_packet,
131 VM_PKT_DATA_INBAND, 0);
132 /*
133 * If we failed here, we might as well return and
134 * have a leak rather than continue and a bugchk
135 */
136 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700137 netdev_err(ndev, "unable to send "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700138 "revoke receive buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700139 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700140 }
141 }
142
143 /* Teardown the gpadl on the vsp end */
144 if (net_device->recv_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200145 ret = vmbus_teardown_gpadl(device->channel,
146 net_device->recv_buf_gpadl_handle);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700147
148 /* If we failed here, we might as well return and have a leak
149 * rather than continue and a bugchk
150 */
151 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700152 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700153 "unable to teardown receive buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700154 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700155 }
156 net_device->recv_buf_gpadl_handle = 0;
157 }
158
159 if (net_device->recv_buf) {
160 /* Free up the receive buffer */
Haiyang Zhangb679ef72014-01-27 15:03:42 -0800161 vfree(net_device->recv_buf);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700162 net_device->recv_buf = NULL;
163 }
164
165 if (net_device->recv_section) {
166 net_device->recv_section_cnt = 0;
167 kfree(net_device->recv_section);
168 net_device->recv_section = NULL;
169 }
170
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700171 /* Deal with the send buffer we may have setup.
172 * If we got a send section size, it means we received a
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800173 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
174 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700175 * to send a revoke msg here
176 */
177 if (net_device->send_section_size) {
178 /* Send the revoke receive buffer */
179 revoke_packet = &net_device->revoke_packet;
180 memset(revoke_packet, 0, sizeof(struct nvsp_message));
181
182 revoke_packet->hdr.msg_type =
183 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800184 revoke_packet->msg.v1_msg.revoke_send_buf.id =
185 NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700186
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200187 ret = vmbus_sendpacket(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700188 revoke_packet,
189 sizeof(struct nvsp_message),
190 (unsigned long)revoke_packet,
191 VM_PKT_DATA_INBAND, 0);
192 /* If we failed here, we might as well return and
193 * have a leak rather than continue and a bugchk
194 */
195 if (ret != 0) {
196 netdev_err(ndev, "unable to send "
197 "revoke send buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700198 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700199 }
200 }
201 /* Teardown the gpadl on the vsp end */
202 if (net_device->send_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200203 ret = vmbus_teardown_gpadl(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700204 net_device->send_buf_gpadl_handle);
205
206 /* If we failed here, we might as well return and have a leak
207 * rather than continue and a bugchk
208 */
209 if (ret != 0) {
210 netdev_err(ndev,
211 "unable to teardown send buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700212 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700213 }
Dave Jones2f184232014-06-16 16:59:02 -0400214 net_device->send_buf_gpadl_handle = 0;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700215 }
216 if (net_device->send_buf) {
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800217 /* Free up the send buffer */
KY Srinivasan06b47aa2014-08-02 10:42:02 -0700218 vfree(net_device->send_buf);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700219 net_device->send_buf = NULL;
220 }
221 kfree(net_device->send_section_map);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700222}
223
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700224static int netvsc_init_buf(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700225{
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700226 int ret = 0;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800227 struct netvsc_device *net_device;
228 struct nvsp_message *init_packet;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700229 struct net_device *ndev;
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700230 int node;
Hank Janssenfceaf242009-07-13 15:34:54 -0700231
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800232 net_device = get_outbound_net_device(device);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700233 if (!net_device)
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700234 return -ENODEV;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200235 ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700236
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700237 node = cpu_to_node(device->channel->target_cpu);
238 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
239 if (!net_device->recv_buf)
240 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
241
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800242 if (!net_device->recv_buf) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700243 netdev_err(ndev, "unable to allocate receive "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700244 "buffer of size %d\n", net_device->recv_buf_size);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700245 ret = -ENOMEM;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800246 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700247 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700248
Bill Pemberton454f18a2009-07-27 16:47:24 -0400249 /*
250 * Establish the gpadl handle for this buffer on this
251 * channel. Note: This call uses the vmbus connection rather
252 * than the channel to establish the gpadl handle.
253 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800254 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
255 net_device->recv_buf_size,
256 &net_device->recv_buf_gpadl_handle);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700257 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700258 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700259 "unable to establish receive buffer's gpadl\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800260 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700261 }
262
Bill Pemberton454f18a2009-07-27 16:47:24 -0400263 /* Notify the NetVsp of the gpadl handle */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800264 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700265
Haiyang Zhang85799a32010-12-10 12:03:54 -0800266 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700267
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800268 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
269 init_packet->msg.v1_msg.send_recv_buf.
270 gpadl_handle = net_device->recv_buf_gpadl_handle;
271 init_packet->msg.v1_msg.
272 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
Hank Janssenfceaf242009-07-13 15:34:54 -0700273
Bill Pemberton454f18a2009-07-27 16:47:24 -0400274 /* Send the gpadl notification request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800275 ret = vmbus_sendpacket(device->channel, init_packet,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700276 sizeof(struct nvsp_message),
Haiyang Zhang85799a32010-12-10 12:03:54 -0800277 (unsigned long)init_packet,
Haiyang Zhang415f2282011-01-26 12:12:13 -0800278 VM_PKT_DATA_INBAND,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700279 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700280 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700281 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700282 "unable to send receive buffer's gpadl to netvsp\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800283 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700284 }
285
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200286 wait_for_completion(&net_device->channel_init_wait);
Hank Janssenfceaf242009-07-13 15:34:54 -0700287
Bill Pemberton454f18a2009-07-27 16:47:24 -0400288 /* Check the response */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800289 if (init_packet->msg.v1_msg.
290 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700291 netdev_err(ndev, "Unable to complete receive buffer "
Haiyang Zhang8bff33a2011-09-01 12:19:48 -0700292 "initialization with NetVsp - status %d\n",
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800293 init_packet->msg.v1_msg.
294 send_recv_buf_complete.status);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700295 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800296 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700297 }
298
Bill Pemberton454f18a2009-07-27 16:47:24 -0400299 /* Parse the response */
Hank Janssenfceaf242009-07-13 15:34:54 -0700300
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800301 net_device->recv_section_cnt = init_packet->msg.
302 v1_msg.send_recv_buf_complete.num_sections;
Hank Janssenfceaf242009-07-13 15:34:54 -0700303
Haiyang Zhangc1813202011-11-30 07:19:07 -0800304 net_device->recv_section = kmemdup(
305 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
306 net_device->recv_section_cnt *
307 sizeof(struct nvsp_1_receive_buffer_section),
308 GFP_KERNEL);
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800309 if (net_device->recv_section == NULL) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700310 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800311 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700312 }
313
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700314 /*
315 * For 1st release, there should only be 1 section that represents the
316 * entire receive buffer
317 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800318 if (net_device->recv_section_cnt != 1 ||
319 net_device->recv_section->offset != 0) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700320 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800321 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700322 }
323
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700324 /* Now setup the send buffer.
325 */
K. Y. Srinivasan5defde52015-05-28 17:08:07 -0700326 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
327 if (!net_device->send_buf)
328 net_device->send_buf = vzalloc(net_device->send_buf_size);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700329 if (!net_device->send_buf) {
330 netdev_err(ndev, "unable to allocate send "
331 "buffer of size %d\n", net_device->send_buf_size);
332 ret = -ENOMEM;
333 goto cleanup;
334 }
335
336 /* Establish the gpadl handle for this buffer on this
337 * channel. Note: This call uses the vmbus connection rather
338 * than the channel to establish the gpadl handle.
339 */
340 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
341 net_device->send_buf_size,
342 &net_device->send_buf_gpadl_handle);
343 if (ret != 0) {
344 netdev_err(ndev,
345 "unable to establish send buffer's gpadl\n");
346 goto cleanup;
347 }
348
349 /* Notify the NetVsp of the gpadl handle */
350 init_packet = &net_device->channel_init_pkt;
351 memset(init_packet, 0, sizeof(struct nvsp_message));
352 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800353 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700354 net_device->send_buf_gpadl_handle;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800355 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700356
357 /* Send the gpadl notification request */
358 ret = vmbus_sendpacket(device->channel, init_packet,
359 sizeof(struct nvsp_message),
360 (unsigned long)init_packet,
361 VM_PKT_DATA_INBAND,
362 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
363 if (ret != 0) {
364 netdev_err(ndev,
365 "unable to send send buffer's gpadl to netvsp\n");
366 goto cleanup;
367 }
368
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200369 wait_for_completion(&net_device->channel_init_wait);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700370
371 /* Check the response */
372 if (init_packet->msg.v1_msg.
373 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
374 netdev_err(ndev, "Unable to complete send buffer "
375 "initialization with NetVsp - status %d\n",
376 init_packet->msg.v1_msg.
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800377 send_send_buf_complete.status);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700378 ret = -EINVAL;
379 goto cleanup;
380 }
381
382 /* Parse the response */
383 net_device->send_section_size = init_packet->msg.
384 v1_msg.send_send_buf_complete.section_size;
385
386 /* Section count is simply the size divided by the section size.
387 */
388 net_device->send_section_cnt =
Stephen Hemminger796cc882016-08-23 12:17:47 -0700389 net_device->send_buf_size / net_device->send_section_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700390
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100391 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
392 net_device->send_section_size, net_device->send_section_cnt);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700393
394 /* Setup state for managing the send buffer. */
395 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
396 BITS_PER_LONG);
397
Stephen Hemmingere53a9c22016-08-23 12:17:46 -0700398 net_device->send_section_map = kcalloc(net_device->map_words,
399 sizeof(ulong), GFP_KERNEL);
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800400 if (net_device->send_section_map == NULL) {
401 ret = -ENOMEM;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700402 goto cleanup;
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800403 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700404
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800405 goto exit;
Hank Janssenfceaf242009-07-13 15:34:54 -0700406
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800407cleanup:
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200408 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700409
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800410exit:
Hank Janssenfceaf242009-07-13 15:34:54 -0700411 return ret;
412}
413
Haiyang Zhangf157e782011-12-15 13:45:16 -0800414/* Negotiate NVSP protocol version */
415static int negotiate_nvsp_ver(struct hv_device *device,
416 struct netvsc_device *net_device,
417 struct nvsp_message *init_packet,
418 u32 nvsp_ver)
Hank Janssenfceaf242009-07-13 15:34:54 -0700419{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200420 struct net_device *ndev = hv_get_drvdata(device);
Nicholas Mc Guire7390fe92015-01-25 15:46:31 +0100421 int ret;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800422
423 memset(init_packet, 0, sizeof(struct nvsp_message));
424 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
425 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
426 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
427
428 /* Send the init request */
429 ret = vmbus_sendpacket(device->channel, init_packet,
430 sizeof(struct nvsp_message),
431 (unsigned long)init_packet,
432 VM_PKT_DATA_INBAND,
433 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
434
435 if (ret != 0)
436 return ret;
437
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200438 wait_for_completion(&net_device->channel_init_wait);
Haiyang Zhangf157e782011-12-15 13:45:16 -0800439
440 if (init_packet->msg.init_msg.init_complete.status !=
441 NVSP_STAT_SUCCESS)
442 return -EINVAL;
443
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800444 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
Haiyang Zhangf157e782011-12-15 13:45:16 -0800445 return 0;
446
Haiyang Zhang71790a22015-07-24 10:08:40 -0700447 /* NVSPv2 or later: Send NDIS config */
Haiyang Zhangf157e782011-12-15 13:45:16 -0800448 memset(init_packet, 0, sizeof(struct nvsp_message));
449 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200450 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000451 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800452
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700453 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
Haiyang Zhang71790a22015-07-24 10:08:40 -0700454 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
455
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700456 /* Teaming bit is needed to receive link speed updates */
457 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
458 }
459
Haiyang Zhangf157e782011-12-15 13:45:16 -0800460 ret = vmbus_sendpacket(device->channel, init_packet,
461 sizeof(struct nvsp_message),
462 (unsigned long)init_packet,
463 VM_PKT_DATA_INBAND, 0);
464
465 return ret;
466}
467
468static int netvsc_connect_vsp(struct hv_device *device)
469{
470 int ret;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800471 struct netvsc_device *net_device;
472 struct nvsp_message *init_packet;
473 int ndis_version;
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700474 const u32 ver_list[] = {
475 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800476 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700477 int i;
Hank Janssenfceaf242009-07-13 15:34:54 -0700478
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800479 net_device = get_outbound_net_device(device);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700480 if (!net_device)
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700481 return -ENODEV;
Hank Janssenfceaf242009-07-13 15:34:54 -0700482
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800483 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700484
Haiyang Zhangf157e782011-12-15 13:45:16 -0800485 /* Negotiate the latest NVSP protocol supported */
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700486 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800487 if (negotiate_nvsp_ver(device, net_device, init_packet,
488 ver_list[i]) == 0) {
489 net_device->nvsp_version = ver_list[i];
490 break;
491 }
492
493 if (i < 0) {
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700494 ret = -EPROTO;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800495 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700496 }
Haiyang Zhangf157e782011-12-15 13:45:16 -0800497
498 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
499
Bill Pemberton454f18a2009-07-27 16:47:24 -0400500 /* Send the ndis version */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800501 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700502
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800503 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
KY Srinivasan1f73db42014-04-09 15:00:46 -0700504 ndis_version = 0x00060001;
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800505 else
506 ndis_version = 0x0006001e;
Hank Janssenfceaf242009-07-13 15:34:54 -0700507
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800508 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
509 init_packet->msg.v1_msg.
510 send_ndis_ver.ndis_major_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800511 (ndis_version & 0xFFFF0000) >> 16;
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800512 init_packet->msg.v1_msg.
513 send_ndis_ver.ndis_minor_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800514 ndis_version & 0xFFFF;
Hank Janssenfceaf242009-07-13 15:34:54 -0700515
Bill Pemberton454f18a2009-07-27 16:47:24 -0400516 /* Send the init request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800517 ret = vmbus_sendpacket(device->channel, init_packet,
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800518 sizeof(struct nvsp_message),
519 (unsigned long)init_packet,
520 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700521 if (ret != 0)
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800522 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700523
Bill Pemberton454f18a2009-07-27 16:47:24 -0400524 /* Post the big receive buffer to NetVSP */
Haiyang Zhang99d30162014-03-09 16:10:59 -0700525 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
526 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
527 else
528 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700529 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
Haiyang Zhang99d30162014-03-09 16:10:59 -0700530
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700531 ret = netvsc_init_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700532
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800533cleanup:
Hank Janssenfceaf242009-07-13 15:34:54 -0700534 return ret;
535}
536
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200537static void netvsc_disconnect_vsp(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700538{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200539 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700540}
541
Hank Janssen3e189512010-03-04 22:11:00 +0000542/*
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800543 * netvsc_device_remove - Callback when the root bus device is removed
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700544 */
Stephen Hemmingere08f3ea2016-08-23 12:17:50 -0700545void netvsc_device_remove(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700546{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200547 struct net_device *ndev = hv_get_drvdata(device);
548 struct net_device_context *net_device_ctx = netdev_priv(ndev);
549 struct netvsc_device *net_device = net_device_ctx->nvdev;
stephen hemminger15a863b2017-02-27 10:26:49 -0800550 int i;
Hank Janssenfceaf242009-07-13 15:34:54 -0700551
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200552 netvsc_disconnect_vsp(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700553
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200554 net_device_ctx->nvdev = NULL;
K. Y. Srinivasan38524092011-08-27 11:31:14 -0700555
K. Y. Srinivasan86c921a2011-09-13 10:59:54 -0700556 /*
557 * At this point, no one should be accessing net_device
558 * except in here
559 */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100560 netdev_dbg(ndev, "net device safe to remove\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700561
Bill Pemberton454f18a2009-07-27 16:47:24 -0400562 /* Now, we can close the channel safely */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800563 vmbus_close(device->channel);
Hank Janssenfceaf242009-07-13 15:34:54 -0700564
stephen hemminger79cd8742017-03-09 15:04:15 -0800565 for (i = 0; i < net_device->num_chn; i++)
566 napi_disable(&net_device->chan_table[i].napi);
stephen hemminger15a863b2017-02-27 10:26:49 -0800567
Bill Pemberton454f18a2009-07-27 16:47:24 -0400568 /* Release all resources */
Haiyang Zhangf90251c2014-08-15 19:18:19 +0000569 free_netvsc_device(net_device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700570}
571
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000572#define RING_AVAIL_PERCENT_HIWATER 20
573#define RING_AVAIL_PERCENT_LOWATER 10
574
575/*
576 * Get the percentage of available bytes to write in the ring.
577 * The return value is in range from 0 to 100.
578 */
579static inline u32 hv_ringbuf_avail_percent(
580 struct hv_ring_buffer_info *ring_info)
581{
582 u32 avail_read, avail_write;
583
584 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
585
586 return avail_write * 100 / ring_info->ring_datasize;
587}
588
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700589static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
590 u32 index)
591{
592 sync_change_bit(index, net_device->send_section_map);
593}
594
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700595static void netvsc_send_tx_complete(struct netvsc_device *net_device,
596 struct vmbus_channel *incoming_channel,
597 struct hv_device *device,
stephen hemminger50698d82017-02-27 10:26:47 -0800598 const struct vmpacket_descriptor *desc)
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700599{
stephen hemminger50698d82017-02-27 10:26:47 -0800600 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700601 struct net_device *ndev = hv_get_drvdata(device);
602 struct net_device_context *net_device_ctx = netdev_priv(ndev);
603 struct vmbus_channel *channel = device->channel;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700604 u16 q_idx = 0;
605 int queue_sends;
606
607 /* Notify the layer above us */
608 if (likely(skb)) {
stephen hemminger793e3952017-01-24 13:06:12 -0800609 const struct hv_netvsc_packet *packet
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700610 = (struct hv_netvsc_packet *)skb->cb;
stephen hemminger793e3952017-01-24 13:06:12 -0800611 u32 send_index = packet->send_buf_index;
612 struct netvsc_stats *tx_stats;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700613
614 if (send_index != NETVSC_INVALID_INDEX)
615 netvsc_free_send_slot(net_device, send_index);
stephen hemminger793e3952017-01-24 13:06:12 -0800616 q_idx = packet->q_idx;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700617 channel = incoming_channel;
618
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800619 tx_stats = &net_device->chan_table[q_idx].tx_stats;
stephen hemminger793e3952017-01-24 13:06:12 -0800620
621 u64_stats_update_begin(&tx_stats->syncp);
622 tx_stats->packets += packet->total_packets;
623 tx_stats->bytes += packet->total_bytes;
624 u64_stats_update_end(&tx_stats->syncp);
625
Stephen Hemminger17db4bc2016-09-22 16:56:29 -0700626 dev_consume_skb_any(skb);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700627 }
628
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800629 queue_sends =
630 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700631
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800632 if (net_device->destroy && queue_sends == 0)
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700633 wake_up(&net_device->wait_drain);
634
635 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
636 !net_device_ctx->start_remove &&
637 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
638 queue_sends < 1))
639 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
640}
641
KY Srinivasan97c17232014-02-16 16:38:44 -0800642static void netvsc_send_completion(struct netvsc_device *net_device,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800643 struct vmbus_channel *incoming_channel,
KY Srinivasan97c17232014-02-16 16:38:44 -0800644 struct hv_device *device,
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800645 const struct vmpacket_descriptor *desc)
Hank Janssenfceaf242009-07-13 15:34:54 -0700646{
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800647 struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200648 struct net_device *ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700649
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700650 switch (nvsp_packet->hdr.msg_type) {
651 case NVSP_MSG_TYPE_INIT_COMPLETE:
652 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
653 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
654 case NVSP_MSG5_TYPE_SUBCHANNEL:
Bill Pemberton454f18a2009-07-27 16:47:24 -0400655 /* Copy the response back */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800656 memcpy(&net_device->channel_init_pkt, nvsp_packet,
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700657 sizeof(struct nvsp_message));
K. Y. Srinivasan35abb212011-05-10 07:55:41 -0700658 complete(&net_device->channel_init_wait);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700659 break;
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000660
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700661 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
662 netvsc_send_tx_complete(net_device, incoming_channel,
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800663 device, desc);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700664 break;
Hank Janssenfceaf242009-07-13 15:34:54 -0700665
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700666 default:
667 netdev_err(ndev,
668 "Unknown send completion type %d received!!\n",
669 nvsp_packet->hdr.msg_type);
Hank Janssenfceaf242009-07-13 15:34:54 -0700670 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700671}
672
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700673static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
674{
stephen hemmingerb58a1852017-01-24 13:06:14 -0800675 unsigned long *map_addr = net_device->send_section_map;
676 unsigned int i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700677
stephen hemmingerb58a1852017-01-24 13:06:14 -0800678 for_each_clear_bit(i, map_addr, net_device->map_words) {
679 if (sync_test_and_set_bit(i, map_addr) == 0)
680 return i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700681 }
stephen hemmingerb58a1852017-01-24 13:06:14 -0800682
683 return NETVSC_INVALID_INDEX;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700684}
685
Lad, Prabhakarda19fcd2015-02-05 15:06:33 +0000686static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
687 unsigned int section_index,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700688 u32 pend_size,
KY Srinivasan24476762015-12-01 16:43:06 -0800689 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800690 struct rndis_message *rndis_msg,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800691 struct hv_page_buffer **pb,
692 struct sk_buff *skb)
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700693{
694 char *start = net_device->send_buf;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700695 char *dest = start + (section_index * net_device->send_section_size)
696 + pend_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700697 int i;
698 u32 msg_size = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700699 u32 padding = 0;
700 u32 remain = packet->total_data_buflen % net_device->pkt_align;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700701 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
702 packet->page_buf_cnt;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700703
704 /* Add padding */
Stephen Hemminger34543232017-02-05 17:20:34 -0700705 if (skb && skb->xmit_more && remain &&
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700706 !packet->cp_partial) {
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700707 padding = net_device->pkt_align - remain;
KY Srinivasan24476762015-12-01 16:43:06 -0800708 rndis_msg->msg_len += padding;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700709 packet->total_data_buflen += padding;
710 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700711
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700712 for (i = 0; i < page_count; i++) {
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800713 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
714 u32 offset = (*pb)[i].offset;
715 u32 len = (*pb)[i].len;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700716
717 memcpy(dest, (src + offset), len);
718 msg_size += len;
719 dest += len;
720 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700721
722 if (padding) {
723 memset(dest, 0, padding);
724 msg_size += padding;
725 }
726
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700727 return msg_size;
728}
729
Stephen Hemminger3a8963a2016-09-09 12:45:24 -0700730static inline int netvsc_send_pkt(
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200731 struct hv_device *device,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700732 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800733 struct netvsc_device *net_device,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800734 struct hv_page_buffer **pb,
735 struct sk_buff *skb)
Hank Janssenfceaf242009-07-13 15:34:54 -0700736{
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700737 struct nvsp_message nvmsg;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800738 struct netvsc_channel *nvchan
739 = &net_device->chan_table[packet->q_idx];
740 struct vmbus_channel *out_channel = nvchan->channel;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200741 struct net_device *ndev = hv_get_drvdata(device);
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800742 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700743 u64 req_id;
744 int ret;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700745 struct hv_page_buffer *pgbuf;
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700746 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700747
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700748 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800749 if (skb != NULL) {
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700750 /* 0 is RMC_DATA; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700751 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700752 } else {
753 /* 1 is RMC_CONTROL; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700754 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700755 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700756
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700757 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
758 packet->send_buf_index;
759 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
760 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
761 else
762 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
763 packet->total_data_buflen;
Hank Janssenfceaf242009-07-13 15:34:54 -0700764
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800765 req_id = (ulong)skb;
Haiyang Zhangf1ea3cd2013-04-05 11:44:40 +0000766
Haiyang Zhangc3582a22014-12-01 13:28:39 -0800767 if (out_channel->rescind)
768 return -ENODEV;
769
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800770 if (packet->page_buf_cnt) {
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800771 pgbuf = packet->cp_partial ? (*pb) +
772 packet->rmsg_pgcnt : (*pb);
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700773 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
774 pgbuf,
775 packet->page_buf_cnt,
776 &nvmsg,
777 sizeof(struct nvsp_message),
778 req_id,
Stephen Hemminger34543232017-02-05 17:20:34 -0700779 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700780 } else {
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700781 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
782 sizeof(struct nvsp_message),
783 req_id,
784 VM_PKT_DATA_INBAND,
Stephen Hemminger34543232017-02-05 17:20:34 -0700785 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Hank Janssenfceaf242009-07-13 15:34:54 -0700786 }
787
Haiyang Zhang1d068252011-12-02 11:56:25 -0800788 if (ret == 0) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800789 atomic_inc_return(&nvchan->queue_sends);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700790
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800791 if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800792 netif_tx_stop_queue(txq);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800793 } else if (ret == -EAGAIN) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800794 netif_tx_stop_queue(txq);
795 if (atomic_read(&nvchan->queue_sends) < 1) {
796 netif_tx_wake_queue(txq);
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000797 ret = -ENOSPC;
798 }
Haiyang Zhang1d068252011-12-02 11:56:25 -0800799 } else {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700800 netdev_err(ndev, "Unable to send packet %p ret %d\n",
Haiyang Zhang85799a32010-12-10 12:03:54 -0800801 packet, ret);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800802 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700803
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700804 return ret;
805}
806
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800807/* Move packet out of multi send data (msd), and clear msd */
808static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
809 struct sk_buff **msd_skb,
810 struct multi_send_data *msdp)
811{
812 *msd_skb = msdp->skb;
813 *msd_send = msdp->pkt;
814 msdp->skb = NULL;
815 msdp->pkt = NULL;
816 msdp->count = 0;
817}
818
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700819int netvsc_send(struct hv_device *device,
KY Srinivasan24476762015-12-01 16:43:06 -0800820 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800821 struct rndis_message *rndis_msg,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800822 struct hv_page_buffer **pb,
823 struct sk_buff *skb)
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700824{
825 struct netvsc_device *net_device;
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700826 int ret = 0;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800827 struct netvsc_channel *nvchan;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700828 u32 pktlen = packet->total_data_buflen, msd_len = 0;
829 unsigned int section_index = NETVSC_INVALID_INDEX;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700830 struct multi_send_data *msdp;
831 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800832 struct sk_buff *msd_skb = NULL;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700833 bool try_batch;
KY Srinivasanbde79be2015-12-01 16:43:17 -0800834 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700835
836 net_device = get_outbound_net_device(device);
837 if (!net_device)
838 return -ENODEV;
839
Vitaly Kuznetsove8f0a892016-10-19 15:53:01 +0200840 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
841 * here before the negotiation with the host is finished and
842 * send_section_map may not be allocated yet.
843 */
844 if (!net_device->send_section_map)
845 return -EAGAIN;
846
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800847 nvchan = &net_device->chan_table[packet->q_idx];
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700848 packet->send_buf_index = NETVSC_INVALID_INDEX;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700849 packet->cp_partial = false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700850
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800851 /* Send control message directly without accessing msd (Multi-Send
852 * Data) field which may be changed during data packet processing.
853 */
854 if (!skb) {
855 cur_send = packet;
856 goto send_now;
857 }
858
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700859 /* batch packets in send buffer if possible */
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800860 msdp = &nvchan->msd;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700861 if (msdp->pkt)
862 msd_len = msdp->pkt->total_data_buflen;
863
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800864 try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700865 net_device->max_pkt;
866
867 if (try_batch && msd_len + pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700868 net_device->send_section_size) {
869 section_index = msdp->pkt->send_buf_index;
870
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700871 } else if (try_batch && msd_len + packet->rmsg_size <
872 net_device->send_section_size) {
873 section_index = msdp->pkt->send_buf_index;
874 packet->cp_partial = true;
875
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800876 } else if ((skb != NULL) && pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700877 net_device->send_section_size) {
878 section_index = netvsc_get_next_send_section(net_device);
879 if (section_index != NETVSC_INVALID_INDEX) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800880 move_pkt_msd(&msd_send, &msd_skb, msdp);
881 msd_len = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700882 }
883 }
884
885 if (section_index != NETVSC_INVALID_INDEX) {
886 netvsc_copy_to_send_buf(net_device,
887 section_index, msd_len,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800888 packet, rndis_msg, pb, skb);
KY Srinivasanb08cc792015-03-29 21:08:42 -0700889
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700890 packet->send_buf_index = section_index;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700891
892 if (packet->cp_partial) {
893 packet->page_buf_cnt -= packet->rmsg_pgcnt;
894 packet->total_data_buflen = msd_len + packet->rmsg_size;
895 } else {
896 packet->page_buf_cnt = 0;
897 packet->total_data_buflen += msd_len;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700898 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700899
stephen hemminger793e3952017-01-24 13:06:12 -0800900 if (msdp->pkt) {
901 packet->total_packets += msdp->pkt->total_packets;
902 packet->total_bytes += msdp->pkt->total_bytes;
903 }
904
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800905 if (msdp->skb)
Stephen Hemminger17db4bc2016-09-22 16:56:29 -0700906 dev_consume_skb_any(msdp->skb);
Haiyang Zhangee90b812015-04-06 15:22:54 -0700907
KY Srinivasanbde79be2015-12-01 16:43:17 -0800908 if (xmit_more && !packet->cp_partial) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800909 msdp->skb = skb;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700910 msdp->pkt = packet;
911 msdp->count++;
912 } else {
913 cur_send = packet;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800914 msdp->skb = NULL;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700915 msdp->pkt = NULL;
916 msdp->count = 0;
917 }
918 } else {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800919 move_pkt_msd(&msd_send, &msd_skb, msdp);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700920 cur_send = packet;
921 }
922
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700923 if (msd_send) {
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700924 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
925 NULL, msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700926
927 if (m_ret != 0) {
928 netvsc_free_send_slot(net_device,
929 msd_send->send_buf_index);
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800930 dev_kfree_skb_any(msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700931 }
932 }
933
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800934send_now:
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700935 if (cur_send)
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200936 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700937
Jerry Snitselaar7aab5152015-05-04 10:57:16 -0700938 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
939 netvsc_free_send_slot(net_device, section_index);
Haiyang Zhangd953ca42015-01-29 12:34:49 -0800940
Hank Janssenfceaf242009-07-13 15:34:54 -0700941 return ret;
942}
943
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700944static int netvsc_send_recv_completion(struct vmbus_channel *channel,
945 u64 transaction_id, u32 status)
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700946{
947 struct nvsp_message recvcompMessage;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700948 int ret;
949
950 recvcompMessage.hdr.msg_type =
951 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
952
Haiyang Zhang63f69212012-10-02 05:30:23 +0000953 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700954
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700955 /* Send the completion */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700956 ret = vmbus_sendpacket(channel, &recvcompMessage,
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700957 sizeof(struct nvsp_message_header) + sizeof(u32),
958 transaction_id, VM_PKT_COMP, 0);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700959
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700960 return ret;
961}
962
963static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
964 u32 *filled, u32 *avail)
965{
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800966 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
967 u32 first = mrc->first;
968 u32 next = mrc->next;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700969
970 *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
971 next - first;
972
973 *avail = NETVSC_RECVSLOT_MAX - *filled - 1;
974}
975
976/* Read the first filled slot, no change to index */
977static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
978 *nvdev, u16 q_idx)
979{
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800980 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700981 u32 filled, avail;
982
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800983 if (unlikely(!mrc->buf))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700984 return NULL;
985
986 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
987 if (!filled)
988 return NULL;
989
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800990 return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700991}
992
993/* Put the first filled slot back to available pool */
994static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
995{
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800996 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700997 int num_recv;
998
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800999 mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001000
1001 num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
1002
1003 if (nvdev->destroy && num_recv == 0)
1004 wake_up(&nvdev->wait_drain);
1005}
1006
1007/* Check and send pending recv completions */
1008static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
1009 struct vmbus_channel *channel, u16 q_idx)
1010{
1011 struct recv_comp_data *rcd;
1012 int ret;
1013
1014 while (true) {
1015 rcd = read_recv_comp_slot(nvdev, q_idx);
1016 if (!rcd)
1017 break;
1018
1019 ret = netvsc_send_recv_completion(channel, rcd->tid,
1020 rcd->status);
1021 if (ret)
1022 break;
1023
1024 put_recv_comp_slot(nvdev, q_idx);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001025 }
1026}
1027
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001028#define NETVSC_RCD_WATERMARK 80
1029
1030/* Get next available slot */
1031static inline struct recv_comp_data *get_recv_comp_slot(
1032 struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
1033{
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001034 struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001035 u32 filled, avail, next;
1036 struct recv_comp_data *rcd;
1037
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001038 if (unlikely(!nvdev->recv_section))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001039 return NULL;
1040
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001041 if (unlikely(!mrc->buf))
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001042 return NULL;
1043
1044 if (atomic_read(&nvdev->num_outstanding_recvs) >
1045 nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
1046 netvsc_chk_recv_comp(nvdev, channel, q_idx);
1047
1048 count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
1049 if (!avail)
1050 return NULL;
1051
stephen hemmingerb8b835a2017-01-24 13:06:07 -08001052 next = mrc->next;
1053 rcd = mrc->buf + next * sizeof(struct recv_comp_data);
1054 mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001055
1056 atomic_inc(&nvdev->num_outstanding_recvs);
1057
1058 return rcd;
1059}
1060
stephen hemminger15a863b2017-02-27 10:26:49 -08001061static int netvsc_receive(struct net_device *ndev,
stephen hemmingerdc54a082017-01-24 13:06:08 -08001062 struct netvsc_device *net_device,
1063 struct net_device_context *net_device_ctx,
1064 struct hv_device *device,
1065 struct vmbus_channel *channel,
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001066 const struct vmpacket_descriptor *desc,
stephen hemmingerdc54a082017-01-24 13:06:08 -08001067 struct nvsp_message *nvsp)
Hank Janssenfceaf242009-07-13 15:34:54 -07001068{
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001069 const struct vmtransfer_page_packet_header *vmxferpage_packet
1070 = container_of(desc, const struct vmtransfer_page_packet_header, d);
stephen hemminger15a863b2017-02-27 10:26:49 -08001071 u16 q_idx = channel->offermsg.offer.sub_channel_index;
stephen hemmingerdc54a082017-01-24 13:06:08 -08001072 char *recv_buf = net_device->recv_buf;
Haiyang Zhang4baab262014-04-21 14:54:43 -07001073 u32 status = NVSP_STAT_SUCCESS;
Haiyang Zhang45326342011-12-15 13:45:15 -08001074 int i;
1075 int count = 0;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001076 int ret;
K. Y. Srinivasan779b4d12011-04-26 09:20:22 -07001077
Bill Pemberton454f18a2009-07-27 16:47:24 -04001078 /* Make sure this is a valid nvsp packet */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001079 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1080 netif_err(net_device_ctx, rx_err, ndev,
1081 "Unknown nvsp packet type received %u\n",
1082 nvsp->hdr.msg_type);
stephen hemminger15a863b2017-02-27 10:26:49 -08001083 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -07001084 }
1085
stephen hemmingerdc54a082017-01-24 13:06:08 -08001086 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1087 netif_err(net_device_ctx, rx_err, ndev,
1088 "Invalid xfer page set id - expecting %x got %x\n",
1089 NETVSC_RECEIVE_BUFFER_ID,
1090 vmxferpage_packet->xfer_pageset_id);
stephen hemminger15a863b2017-02-27 10:26:49 -08001091 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -07001092 }
1093
Haiyang Zhang4baab262014-04-21 14:54:43 -07001094 count = vmxferpage_packet->range_cnt;
Hank Janssenfceaf242009-07-13 15:34:54 -07001095
Bill Pemberton454f18a2009-07-27 16:47:24 -04001096 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
Haiyang Zhang4baab262014-04-21 14:54:43 -07001097 for (i = 0; i < count; i++) {
stephen hemmingerdc54a082017-01-24 13:06:08 -08001098 void *data = recv_buf
1099 + vmxferpage_packet->ranges[i].byte_offset;
1100 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
Hank Janssenfceaf242009-07-13 15:34:54 -07001101
Bill Pemberton454f18a2009-07-27 16:47:24 -04001102 /* Pass it to the upper layer */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001103 status = rndis_filter_receive(ndev, net_device, device,
1104 channel, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -07001105 }
1106
stephen hemminger15a863b2017-02-27 10:26:49 -08001107 if (net_device->chan_table[q_idx].mrc.buf) {
1108 struct recv_comp_data *rcd;
1109
1110 rcd = get_recv_comp_slot(net_device, channel, q_idx);
1111 if (rcd) {
1112 rcd->tid = vmxferpage_packet->d.trans_id;
1113 rcd->status = status;
1114 } else {
1115 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1116 q_idx, vmxferpage_packet->d.trans_id);
1117 }
1118 } else {
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001119 ret = netvsc_send_recv_completion(channel,
1120 vmxferpage_packet->d.trans_id,
1121 status);
1122 if (ret)
1123 netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
1124 q_idx, vmxferpage_packet->d.trans_id, ret);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001125 }
stephen hemminger15a863b2017-02-27 10:26:49 -08001126 return count;
Hank Janssenfceaf242009-07-13 15:34:54 -07001127}
1128
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001129static void netvsc_send_table(struct hv_device *hdev,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001130 struct nvsp_message *nvmsg)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001131{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001132 struct net_device *ndev = hv_get_drvdata(hdev);
stephen hemminger7ce10122017-03-09 14:58:29 -08001133 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001134 int i;
1135 u32 count, *tab;
1136
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001137 count = nvmsg->msg.v5_msg.send_table.count;
1138 if (count != VRSS_SEND_TAB_SIZE) {
1139 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1140 return;
1141 }
1142
1143 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1144 nvmsg->msg.v5_msg.send_table.offset);
1145
1146 for (i = 0; i < count; i++)
stephen hemminger7ce10122017-03-09 14:58:29 -08001147 net_device_ctx->tx_send_table[i] = tab[i];
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001148}
1149
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001150static void netvsc_send_vf(struct net_device_context *net_device_ctx,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001151 struct nvsp_message *nvmsg)
1152{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001153 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1154 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
Haiyang Zhang71790a22015-07-24 10:08:40 -07001155}
1156
1157static inline void netvsc_receive_inband(struct hv_device *hdev,
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001158 struct net_device_context *net_device_ctx,
1159 struct nvsp_message *nvmsg)
Haiyang Zhang71790a22015-07-24 10:08:40 -07001160{
1161 switch (nvmsg->hdr.msg_type) {
1162 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1163 netvsc_send_table(hdev, nvmsg);
1164 break;
1165
1166 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001167 netvsc_send_vf(net_device_ctx, nvmsg);
Haiyang Zhang71790a22015-07-24 10:08:40 -07001168 break;
1169 }
1170}
1171
stephen hemminger15a863b2017-02-27 10:26:49 -08001172static int netvsc_process_raw_pkt(struct hv_device *device,
1173 struct vmbus_channel *channel,
1174 struct netvsc_device *net_device,
1175 struct net_device *ndev,
1176 u64 request_id,
1177 const struct vmpacket_descriptor *desc)
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001178{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001179 struct net_device_context *net_device_ctx = netdev_priv(ndev);
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001180 struct nvsp_message *nvmsg = hv_pkt_data(desc);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001181
1182 switch (desc->type) {
1183 case VM_PKT_COMP:
1184 netvsc_send_completion(net_device, channel, device, desc);
1185 break;
1186
1187 case VM_PKT_DATA_USING_XFER_PAGES:
stephen hemminger15a863b2017-02-27 10:26:49 -08001188 return netvsc_receive(ndev, net_device, net_device_ctx,
1189 device, channel, desc, nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001190 break;
1191
1192 case VM_PKT_DATA_INBAND:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001193 netvsc_receive_inband(device, net_device_ctx, nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001194 break;
1195
1196 default:
1197 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1198 desc->type, request_id);
1199 break;
1200 }
stephen hemminger15a863b2017-02-27 10:26:49 -08001201
1202 return 0;
1203}
1204
1205static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1206{
1207 struct vmbus_channel *primary = channel->primary_channel;
1208
1209 return primary ? primary->device_obj : channel->device_obj;
1210}
1211
1212int netvsc_poll(struct napi_struct *napi, int budget)
1213{
1214 struct netvsc_channel *nvchan
1215 = container_of(napi, struct netvsc_channel, napi);
1216 struct vmbus_channel *channel = nvchan->channel;
1217 struct hv_device *device = netvsc_channel_to_device(channel);
1218 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1219 struct net_device *ndev = hv_get_drvdata(device);
1220 struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
1221 const struct vmpacket_descriptor *desc;
1222 int work_done = 0;
1223
1224 desc = hv_pkt_iter_first(channel);
1225 while (desc) {
1226 int count;
1227
1228 count = netvsc_process_raw_pkt(device, channel, net_device,
1229 ndev, desc->trans_id, desc);
1230 work_done += count;
1231 desc = __hv_pkt_iter_next(channel, desc);
1232
1233 /* If receive packet budget is exhausted, reschedule */
1234 if (work_done >= budget) {
1235 work_done = budget;
1236 break;
1237 }
1238 }
1239 hv_pkt_iter_close(channel);
1240
1241 /* If ring is empty and NAPI is not doing polling */
1242 if (work_done < budget &&
1243 napi_complete_done(napi, work_done) &&
1244 hv_end_read(&channel->inbound) != 0)
1245 napi_reschedule(napi);
1246
1247 netvsc_chk_recv_comp(net_device, channel, q_idx);
1248 return work_done;
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001249}
1250
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001251void netvsc_channel_cb(void *context)
Hank Janssenfceaf242009-07-13 15:34:54 -07001252{
stephen hemminger6de38af2017-03-16 16:12:37 -07001253 struct netvsc_channel *nvchan = context;
stephen hemminger0b307eb2017-01-24 13:05:58 -08001254
stephen hemminger15a863b2017-02-27 10:26:49 -08001255 /* disable interupts from host */
stephen hemminger6de38af2017-03-16 16:12:37 -07001256 hv_begin_read(&nvchan->channel->inbound);
stephen hemminger0d6dd352017-03-09 15:04:14 -08001257
stephen hemminger6de38af2017-03-16 16:12:37 -07001258 napi_schedule(&nvchan->napi);
Hank Janssenfceaf242009-07-13 15:34:54 -07001259}
Haiyang Zhangaf24ce42011-04-21 12:30:40 -07001260
1261/*
Haiyang Zhangb637e022011-04-21 12:30:45 -07001262 * netvsc_device_add - Callback when the device belonging to this
1263 * driver is added
1264 */
stephen hemminger2c7f83c2017-01-24 13:06:09 -08001265int netvsc_device_add(struct hv_device *device,
1266 const struct netvsc_device_info *device_info)
Haiyang Zhangb637e022011-04-21 12:30:45 -07001267{
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001268 int i, ret = 0;
stephen hemminger2c7f83c2017-01-24 13:06:09 -08001269 int ring_size = device_info->ring_size;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001270 struct netvsc_device *net_device;
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001271 struct net_device *ndev = hv_get_drvdata(device);
1272 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001273
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001274 net_device = alloc_net_device();
Dan Carpenterb1c84922014-09-04 14:11:23 +03001275 if (!net_device)
1276 return -ENOMEM;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001277
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001278 net_device->ring_size = ring_size;
1279
stephen hemminger15a863b2017-02-27 10:26:49 -08001280 /* Because the device uses NAPI, all the interrupt batching and
1281 * control is done via Net softirq, not the channel handling
1282 */
1283 set_channel_read_mode(device->channel, HV_CALL_ISR);
1284
Haiyang Zhangb637e022011-04-21 12:30:45 -07001285 /* Open the channel */
K. Y. Srinivasanaae23982011-05-12 19:35:05 -07001286 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1287 ring_size * PAGE_SIZE, NULL, 0,
stephen hemminger6de38af2017-03-16 16:12:37 -07001288 netvsc_channel_cb,
1289 net_device->chan_table);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001290
1291 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001292 netdev_err(ndev, "unable to open channel: %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001293 goto cleanup;
1294 }
1295
1296 /* Channel is opened */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +01001297 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
Haiyang Zhangb637e022011-04-21 12:30:45 -07001298
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001299 /* If we're reopening the device we may have multiple queues, fill the
1300 * chn_table with the default channel to use it before subchannels are
1301 * opened.
1302 */
stephen hemminger15a863b2017-02-27 10:26:49 -08001303 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1304 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1305
1306 nvchan->channel = device->channel;
1307 netif_napi_add(ndev, &nvchan->napi,
1308 netvsc_poll, NAPI_POLL_WEIGHT);
1309 }
1310
1311 /* Enable NAPI handler for init callbacks */
1312 napi_enable(&net_device->chan_table[0].napi);
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001313
1314 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1315 * populated.
1316 */
1317 wmb();
1318
1319 net_device_ctx->nvdev = net_device;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001320
Haiyang Zhangb637e022011-04-21 12:30:45 -07001321 /* Connect with the NetVsp */
1322 ret = netvsc_connect_vsp(device);
1323 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001324 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -07001325 "unable to connect to NetVSP - %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001326 goto close;
1327 }
1328
1329 return ret;
1330
1331close:
stephen hemminger15a863b2017-02-27 10:26:49 -08001332 napi_disable(&net_device->chan_table[0].napi);
1333
Haiyang Zhangb637e022011-04-21 12:30:45 -07001334 /* Now, we can close the channel safely */
1335 vmbus_close(device->channel);
1336
1337cleanup:
Haiyang Zhangf90251c2014-08-15 19:18:19 +00001338 free_netvsc_device(net_device);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001339
1340 return ret;
1341}