blob: 4c709b454d3481a28e0d9cf80f5e88ec46d02f6b [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Greg Kroah-Hartman5654e932009-07-14 15:08:20 -070022#include <linux/kernel.h>
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -080023#include <linux/sched.h>
24#include <linux/wait.h>
Greg Kroah-Hartman0ffa63b2009-07-15 11:06:01 -070025#include <linux/mm.h>
Greg Kroah-Hartmanb4362c92009-07-16 11:50:41 -070026#include <linux/delay.h>
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -070027#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Haiyang Zhangd9871152011-09-01 12:19:41 -070029#include <linux/netdevice.h>
Haiyang Zhangf157e782011-12-15 13:45:16 -080030#include <linux/if_ether.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100031#include <linux/vmalloc.h>
stephen hemminger9749fed2017-07-19 11:53:16 -070032#include <linux/rtnetlink.h>
stephen hemminger43bf99c2017-07-24 10:57:27 -070033#include <linux/prefetch.h>
stephen hemminger9749fed2017-07-19 11:53:16 -070034
KY Srinivasanc25aaf82014-04-30 10:14:31 -070035#include <asm/sync_bitops.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070036
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070037#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070038
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070039/*
40 * Switch the data path from the synthetic interface to the VF
41 * interface.
42 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020043void netvsc_switch_datapath(struct net_device *ndev, bool vf)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070044{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +020045 struct net_device_context *net_device_ctx = netdev_priv(ndev);
46 struct hv_device *dev = net_device_ctx->device_ctx;
stephen hemminger79e8cbe2017-07-19 11:53:13 -070047 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020048 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070049
50 memset(init_pkt, 0, sizeof(struct nvsp_message));
51 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
52 if (vf)
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_VF;
55 else
56 init_pkt->msg.v4_msg.active_dp.active_datapath =
57 NVSP_DATAPATH_SYNTHETIC;
58
59 vmbus_sendpacket(dev->channel, init_pkt,
60 sizeof(struct nvsp_message),
61 (unsigned long)init_pkt,
62 VM_PKT_DATA_INBAND, 0);
Haiyang Zhang53fa1a62017-06-21 16:40:47 -070063
64 net_device_ctx->datapath = vf;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070065}
66
Vitaly Kuznetsov88098832016-05-13 13:55:25 +020067static struct netvsc_device *alloc_net_device(void)
Hank Janssenfceaf242009-07-13 15:34:54 -070068{
Haiyang Zhang85799a32010-12-10 12:03:54 -080069 struct netvsc_device *net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070070
Haiyang Zhang85799a32010-12-10 12:03:54 -080071 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
72 if (!net_device)
Hank Janssenfceaf242009-07-13 15:34:54 -070073 return NULL;
74
Haiyang Zhangdc5cd892012-06-04 06:42:38 +000075 init_waitqueue_head(&net_device->wait_drain);
K. Y. Srinivasanc38b9c72011-08-27 11:31:12 -070076 net_device->destroy = false;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -070077 atomic_set(&net_device->open_cnt, 0);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070078 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
Stephen Hemmingerfd612602016-08-23 12:17:51 -070080 init_completion(&net_device->channel_init_wait);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -070081
Haiyang Zhang85799a32010-12-10 12:03:54 -080082 return net_device;
Hank Janssenfceaf242009-07-13 15:34:54 -070083}
84
stephen hemminger545a8e72017-03-22 14:51:00 -070085static void free_netvsc_device(struct rcu_head *head)
Haiyang Zhangf90251c2014-08-15 19:18:19 +000086{
stephen hemminger545a8e72017-03-22 14:51:00 -070087 struct netvsc_device *nvdev
88 = container_of(head, struct netvsc_device, rcu);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070089 int i;
90
91 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
stephen hemminger7426b1a2017-07-28 08:59:45 -070092 vfree(nvdev->chan_table[i].mrc.slots);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -070093
Haiyang Zhangf90251c2014-08-15 19:18:19 +000094 kfree(nvdev);
95}
96
stephen hemminger545a8e72017-03-22 14:51:00 -070097static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
98{
99 call_rcu(&nvdev->rcu, free_netvsc_device);
100}
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800101
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700102static void netvsc_destroy_buf(struct hv_device *device)
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700103{
104 struct nvsp_message *revoke_packet;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200105 struct net_device *ndev = hv_get_drvdata(device);
stephen hemminger39629812017-07-19 11:53:19 -0700106 struct net_device_context *ndc = netdev_priv(ndev);
107 struct netvsc_device *net_device = rtnl_dereference(ndc->nvdev);
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700108 int ret;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700109
110 /*
111 * If we got a section count, it means we received a
112 * SendReceiveBufferComplete msg (ie sent
113 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
114 * to send a revoke msg here
115 */
116 if (net_device->recv_section_cnt) {
117 /* Send the revoke receive buffer */
118 revoke_packet = &net_device->revoke_packet;
119 memset(revoke_packet, 0, sizeof(struct nvsp_message));
120
121 revoke_packet->hdr.msg_type =
122 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
123 revoke_packet->msg.v1_msg.
124 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
125
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200126 ret = vmbus_sendpacket(device->channel,
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700127 revoke_packet,
128 sizeof(struct nvsp_message),
129 (unsigned long)revoke_packet,
130 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan73e64fa2017-04-19 13:53:49 -0700131 /* If the failure is because the channel is rescinded;
132 * ignore the failure since we cannot send on a rescinded
133 * channel. This would allow us to properly cleanup
134 * even when the channel is rescinded.
135 */
136 if (device->channel->rescind)
137 ret = 0;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700138 /*
139 * If we failed here, we might as well return and
140 * have a leak rather than continue and a bugchk
141 */
142 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700143 netdev_err(ndev, "unable to send "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700144 "revoke receive buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700145 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700146 }
147 }
148
149 /* Teardown the gpadl on the vsp end */
150 if (net_device->recv_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200151 ret = vmbus_teardown_gpadl(device->channel,
152 net_device->recv_buf_gpadl_handle);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700153
154 /* If we failed here, we might as well return and have a leak
155 * rather than continue and a bugchk
156 */
157 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700158 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700159 "unable to teardown receive buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700160 return;
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700161 }
162 net_device->recv_buf_gpadl_handle = 0;
163 }
164
165 if (net_device->recv_buf) {
166 /* Free up the receive buffer */
Haiyang Zhangb679ef72014-01-27 15:03:42 -0800167 vfree(net_device->recv_buf);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700168 net_device->recv_buf = NULL;
169 }
170
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700171 /* Deal with the send buffer we may have setup.
172 * If we got a send section size, it means we received a
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800173 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
174 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700175 * to send a revoke msg here
176 */
177 if (net_device->send_section_size) {
178 /* Send the revoke receive buffer */
179 revoke_packet = &net_device->revoke_packet;
180 memset(revoke_packet, 0, sizeof(struct nvsp_message));
181
182 revoke_packet->hdr.msg_type =
183 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800184 revoke_packet->msg.v1_msg.revoke_send_buf.id =
185 NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700186
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200187 ret = vmbus_sendpacket(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700188 revoke_packet,
189 sizeof(struct nvsp_message),
190 (unsigned long)revoke_packet,
191 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan73e64fa2017-04-19 13:53:49 -0700192
193 /* If the failure is because the channel is rescinded;
194 * ignore the failure since we cannot send on a rescinded
195 * channel. This would allow us to properly cleanup
196 * even when the channel is rescinded.
197 */
198 if (device->channel->rescind)
199 ret = 0;
200
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700201 /* If we failed here, we might as well return and
202 * have a leak rather than continue and a bugchk
203 */
204 if (ret != 0) {
205 netdev_err(ndev, "unable to send "
206 "revoke send buffer to netvsp\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700207 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700208 }
209 }
210 /* Teardown the gpadl on the vsp end */
211 if (net_device->send_buf_gpadl_handle) {
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200212 ret = vmbus_teardown_gpadl(device->channel,
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700213 net_device->send_buf_gpadl_handle);
214
215 /* If we failed here, we might as well return and have a leak
216 * rather than continue and a bugchk
217 */
218 if (ret != 0) {
219 netdev_err(ndev,
220 "unable to teardown send buffer's gpadl\n");
Stephen Hemminger7a2a0a82016-08-23 12:17:54 -0700221 return;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700222 }
Dave Jones2f184232014-06-16 16:59:02 -0400223 net_device->send_buf_gpadl_handle = 0;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700224 }
225 if (net_device->send_buf) {
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800226 /* Free up the send buffer */
KY Srinivasan06b47aa2014-08-02 10:42:02 -0700227 vfree(net_device->send_buf);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700228 net_device->send_buf = NULL;
229 }
230 kfree(net_device->send_section_map);
Haiyang Zhangec91cd02011-04-21 12:30:43 -0700231}
232
stephen hemminger7426b1a2017-07-28 08:59:45 -0700233int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
234{
235 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
236 int node = cpu_to_node(nvchan->channel->target_cpu);
237 size_t size;
238
239 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
240 nvchan->mrc.slots = vzalloc_node(size, node);
241 if (!nvchan->mrc.slots)
242 nvchan->mrc.slots = vzalloc(size);
243
244 return nvchan->mrc.slots ? 0 : -ENOMEM;
245}
246
stephen hemminger95790832017-06-08 16:21:22 -0700247static int netvsc_init_buf(struct hv_device *device,
248 struct netvsc_device *net_device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700249{
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700250 int ret = 0;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800251 struct nvsp_message *init_packet;
stephen hemminger7426b1a2017-07-28 08:59:45 -0700252 struct nvsp_1_message_send_receive_buffer_complete *resp;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700253 struct net_device *ndev;
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700254 size_t map_words;
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700255 int node;
Hank Janssenfceaf242009-07-13 15:34:54 -0700256
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200257 ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700258
K. Y. Srinivasan0a726c22015-05-28 17:08:06 -0700259 node = cpu_to_node(device->channel->target_cpu);
260 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
261 if (!net_device->recv_buf)
262 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
263
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800264 if (!net_device->recv_buf) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700265 netdev_err(ndev, "unable to allocate receive "
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700266 "buffer of size %d\n", net_device->recv_buf_size);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700267 ret = -ENOMEM;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800268 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700269 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700270
Bill Pemberton454f18a2009-07-27 16:47:24 -0400271 /*
272 * Establish the gpadl handle for this buffer on this
273 * channel. Note: This call uses the vmbus connection rather
274 * than the channel to establish the gpadl handle.
275 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800276 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
277 net_device->recv_buf_size,
278 &net_device->recv_buf_gpadl_handle);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700279 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700280 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700281 "unable to establish receive buffer's gpadl\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800282 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700283 }
284
Bill Pemberton454f18a2009-07-27 16:47:24 -0400285 /* Notify the NetVsp of the gpadl handle */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800286 init_packet = &net_device->channel_init_pkt;
Haiyang Zhang85799a32010-12-10 12:03:54 -0800287 memset(init_packet, 0, sizeof(struct nvsp_message));
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800288 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
289 init_packet->msg.v1_msg.send_recv_buf.
290 gpadl_handle = net_device->recv_buf_gpadl_handle;
291 init_packet->msg.v1_msg.
292 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
Hank Janssenfceaf242009-07-13 15:34:54 -0700293
Bill Pemberton454f18a2009-07-27 16:47:24 -0400294 /* Send the gpadl notification request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800295 ret = vmbus_sendpacket(device->channel, init_packet,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700296 sizeof(struct nvsp_message),
Haiyang Zhang85799a32010-12-10 12:03:54 -0800297 (unsigned long)init_packet,
Haiyang Zhang415f2282011-01-26 12:12:13 -0800298 VM_PKT_DATA_INBAND,
Greg Kroah-Hartman5a4df292010-10-21 09:43:24 -0700299 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700300 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -0700301 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -0700302 "unable to send receive buffer's gpadl to netvsp\n");
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800303 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700304 }
305
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200306 wait_for_completion(&net_device->channel_init_wait);
Hank Janssenfceaf242009-07-13 15:34:54 -0700307
Bill Pemberton454f18a2009-07-27 16:47:24 -0400308 /* Check the response */
stephen hemminger7426b1a2017-07-28 08:59:45 -0700309 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
310 if (resp->status != NVSP_STAT_SUCCESS) {
311 netdev_err(ndev,
312 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
313 resp->status);
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700314 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800315 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700316 }
317
Bill Pemberton454f18a2009-07-27 16:47:24 -0400318 /* Parse the response */
stephen hemminger7426b1a2017-07-28 08:59:45 -0700319 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
320 resp->num_sections, resp->sections[0].sub_alloc_size,
321 resp->sections[0].num_sub_allocs);
Hank Janssenfceaf242009-07-13 15:34:54 -0700322
stephen hemminger7426b1a2017-07-28 08:59:45 -0700323 net_device->recv_section_cnt = resp->num_sections;
Hank Janssenfceaf242009-07-13 15:34:54 -0700324
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700325 /*
326 * For 1st release, there should only be 1 section that represents the
327 * entire receive buffer
328 */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800329 if (net_device->recv_section_cnt != 1 ||
stephen hemminger7426b1a2017-07-28 08:59:45 -0700330 resp->sections[0].offset != 0) {
K. Y. Srinivasan927bc332011-08-25 09:49:13 -0700331 ret = -EINVAL;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800332 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700333 }
334
stephen hemminger7426b1a2017-07-28 08:59:45 -0700335 /* Setup receive completion ring */
336 net_device->recv_completion_cnt
337 = round_up(resp->sections[0].num_sub_allocs + 1,
338 PAGE_SIZE / sizeof(u64));
339 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
340 if (ret)
341 goto cleanup;
342
343 /* Now setup the send buffer. */
K. Y. Srinivasan5defde52015-05-28 17:08:07 -0700344 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
345 if (!net_device->send_buf)
346 net_device->send_buf = vzalloc(net_device->send_buf_size);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700347 if (!net_device->send_buf) {
348 netdev_err(ndev, "unable to allocate send "
349 "buffer of size %d\n", net_device->send_buf_size);
350 ret = -ENOMEM;
351 goto cleanup;
352 }
353
354 /* Establish the gpadl handle for this buffer on this
355 * channel. Note: This call uses the vmbus connection rather
356 * than the channel to establish the gpadl handle.
357 */
358 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
359 net_device->send_buf_size,
360 &net_device->send_buf_gpadl_handle);
361 if (ret != 0) {
362 netdev_err(ndev,
363 "unable to establish send buffer's gpadl\n");
364 goto cleanup;
365 }
366
367 /* Notify the NetVsp of the gpadl handle */
368 init_packet = &net_device->channel_init_pkt;
369 memset(init_packet, 0, sizeof(struct nvsp_message));
370 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800371 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700372 net_device->send_buf_gpadl_handle;
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800373 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700374
375 /* Send the gpadl notification request */
376 ret = vmbus_sendpacket(device->channel, init_packet,
377 sizeof(struct nvsp_message),
378 (unsigned long)init_packet,
379 VM_PKT_DATA_INBAND,
380 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
381 if (ret != 0) {
382 netdev_err(ndev,
383 "unable to send send buffer's gpadl to netvsp\n");
384 goto cleanup;
385 }
386
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200387 wait_for_completion(&net_device->channel_init_wait);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700388
389 /* Check the response */
390 if (init_packet->msg.v1_msg.
391 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
392 netdev_err(ndev, "Unable to complete send buffer "
393 "initialization with NetVsp - status %d\n",
394 init_packet->msg.v1_msg.
Haiyang Zhangc51ed182014-12-19 18:25:18 -0800395 send_send_buf_complete.status);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700396 ret = -EINVAL;
397 goto cleanup;
398 }
399
400 /* Parse the response */
401 net_device->send_section_size = init_packet->msg.
402 v1_msg.send_send_buf_complete.section_size;
403
404 /* Section count is simply the size divided by the section size.
405 */
406 net_device->send_section_cnt =
Stephen Hemminger796cc882016-08-23 12:17:47 -0700407 net_device->send_buf_size / net_device->send_section_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700408
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100409 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
410 net_device->send_section_size, net_device->send_section_cnt);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700411
412 /* Setup state for managing the send buffer. */
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700413 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700414
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700415 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800416 if (net_device->send_section_map == NULL) {
417 ret = -ENOMEM;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700418 goto cleanup;
Wei Yongjundd1d3f82014-07-23 09:00:35 +0800419 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700420
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800421 goto exit;
Hank Janssenfceaf242009-07-13 15:34:54 -0700422
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800423cleanup:
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200424 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700425
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800426exit:
Hank Janssenfceaf242009-07-13 15:34:54 -0700427 return ret;
428}
429
Haiyang Zhangf157e782011-12-15 13:45:16 -0800430/* Negotiate NVSP protocol version */
431static int negotiate_nvsp_ver(struct hv_device *device,
432 struct netvsc_device *net_device,
433 struct nvsp_message *init_packet,
434 u32 nvsp_ver)
Hank Janssenfceaf242009-07-13 15:34:54 -0700435{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200436 struct net_device *ndev = hv_get_drvdata(device);
Nicholas Mc Guire7390fe92015-01-25 15:46:31 +0100437 int ret;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800438
439 memset(init_packet, 0, sizeof(struct nvsp_message));
440 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
441 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
442 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
443
444 /* Send the init request */
445 ret = vmbus_sendpacket(device->channel, init_packet,
446 sizeof(struct nvsp_message),
447 (unsigned long)init_packet,
448 VM_PKT_DATA_INBAND,
449 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
450
451 if (ret != 0)
452 return ret;
453
Vitaly Kuznetsov53628552016-06-09 12:44:03 +0200454 wait_for_completion(&net_device->channel_init_wait);
Haiyang Zhangf157e782011-12-15 13:45:16 -0800455
456 if (init_packet->msg.init_msg.init_complete.status !=
457 NVSP_STAT_SUCCESS)
458 return -EINVAL;
459
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800460 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
Haiyang Zhangf157e782011-12-15 13:45:16 -0800461 return 0;
462
Haiyang Zhang71790a22015-07-24 10:08:40 -0700463 /* NVSPv2 or later: Send NDIS config */
Haiyang Zhangf157e782011-12-15 13:45:16 -0800464 memset(init_packet, 0, sizeof(struct nvsp_message));
465 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200466 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
Haiyang Zhang1f5f3a72012-03-12 10:20:50 +0000467 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
Haiyang Zhangf157e782011-12-15 13:45:16 -0800468
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700469 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
Haiyang Zhang71790a22015-07-24 10:08:40 -0700470 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
471
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700472 /* Teaming bit is needed to receive link speed updates */
473 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
474 }
475
Haiyang Zhangf157e782011-12-15 13:45:16 -0800476 ret = vmbus_sendpacket(device->channel, init_packet,
477 sizeof(struct nvsp_message),
478 (unsigned long)init_packet,
479 VM_PKT_DATA_INBAND, 0);
480
481 return ret;
482}
483
stephen hemminger95790832017-06-08 16:21:22 -0700484static int netvsc_connect_vsp(struct hv_device *device,
485 struct netvsc_device *net_device)
Haiyang Zhangf157e782011-12-15 13:45:16 -0800486{
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700487 const u32 ver_list[] = {
488 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
stephen hemminger95790832017-06-08 16:21:22 -0700489 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5
490 };
491 struct nvsp_message *init_packet;
492 int ndis_version, i, ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700493
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800494 init_packet = &net_device->channel_init_pkt;
Hank Janssenfceaf242009-07-13 15:34:54 -0700495
Haiyang Zhangf157e782011-12-15 13:45:16 -0800496 /* Negotiate the latest NVSP protocol supported */
Stephen Hemmingere5a78fa2016-08-23 12:17:49 -0700497 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800498 if (negotiate_nvsp_ver(device, net_device, init_packet,
499 ver_list[i]) == 0) {
500 net_device->nvsp_version = ver_list[i];
501 break;
502 }
503
504 if (i < 0) {
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700505 ret = -EPROTO;
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800506 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700507 }
Haiyang Zhangf157e782011-12-15 13:45:16 -0800508
509 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
510
Bill Pemberton454f18a2009-07-27 16:47:24 -0400511 /* Send the ndis version */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800512 memset(init_packet, 0, sizeof(struct nvsp_message));
Hank Janssenfceaf242009-07-13 15:34:54 -0700513
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800514 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
KY Srinivasan1f73db42014-04-09 15:00:46 -0700515 ndis_version = 0x00060001;
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800516 else
517 ndis_version = 0x0006001e;
Hank Janssenfceaf242009-07-13 15:34:54 -0700518
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800519 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
520 init_packet->msg.v1_msg.
521 send_ndis_ver.ndis_major_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800522 (ndis_version & 0xFFFF0000) >> 16;
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800523 init_packet->msg.v1_msg.
524 send_ndis_ver.ndis_minor_ver =
Haiyang Zhang85799a32010-12-10 12:03:54 -0800525 ndis_version & 0xFFFF;
Hank Janssenfceaf242009-07-13 15:34:54 -0700526
Bill Pemberton454f18a2009-07-27 16:47:24 -0400527 /* Send the init request */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800528 ret = vmbus_sendpacket(device->channel, init_packet,
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800529 sizeof(struct nvsp_message),
530 (unsigned long)init_packet,
531 VM_PKT_DATA_INBAND, 0);
K. Y. Srinivasan0f48c722011-08-25 09:49:14 -0700532 if (ret != 0)
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800533 goto cleanup;
Hank Janssenfceaf242009-07-13 15:34:54 -0700534
Bill Pemberton454f18a2009-07-27 16:47:24 -0400535 /* Post the big receive buffer to NetVSP */
Haiyang Zhang99d30162014-03-09 16:10:59 -0700536 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
537 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
538 else
539 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700540 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
Haiyang Zhang99d30162014-03-09 16:10:59 -0700541
stephen hemminger95790832017-06-08 16:21:22 -0700542 ret = netvsc_init_buf(device, net_device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700543
K. Y. Srinivasan0c3b7b22011-02-11 09:59:43 -0800544cleanup:
Hank Janssenfceaf242009-07-13 15:34:54 -0700545 return ret;
546}
547
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200548static void netvsc_disconnect_vsp(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700549{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200550 netvsc_destroy_buf(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700551}
552
Hank Janssen3e189512010-03-04 22:11:00 +0000553/*
Haiyang Zhang5a71ae32010-12-10 12:03:55 -0800554 * netvsc_device_remove - Callback when the root bus device is removed
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700555 */
Stephen Hemmingere08f3ea2016-08-23 12:17:50 -0700556void netvsc_device_remove(struct hv_device *device)
Hank Janssenfceaf242009-07-13 15:34:54 -0700557{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200558 struct net_device *ndev = hv_get_drvdata(device);
559 struct net_device_context *net_device_ctx = netdev_priv(ndev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -0700560 struct netvsc_device *net_device
561 = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger15a863b2017-02-27 10:26:49 -0800562 int i;
Hank Janssenfceaf242009-07-13 15:34:54 -0700563
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200564 netvsc_disconnect_vsp(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700565
stephen hemminger545a8e72017-03-22 14:51:00 -0700566 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
K. Y. Srinivasan38524092011-08-27 11:31:14 -0700567
K. Y. Srinivasan86c921a2011-09-13 10:59:54 -0700568 /*
569 * At this point, no one should be accessing net_device
570 * except in here
571 */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +0100572 netdev_dbg(ndev, "net device safe to remove\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700573
Bill Pemberton454f18a2009-07-27 16:47:24 -0400574 /* Now, we can close the channel safely */
Haiyang Zhang85799a32010-12-10 12:03:54 -0800575 vmbus_close(device->channel);
Hank Janssenfceaf242009-07-13 15:34:54 -0700576
stephen hemminger76bb5db2017-04-19 15:22:02 -0700577 /* And dissassociate NAPI context from device */
stephen hemminger79cd8742017-03-09 15:04:15 -0800578 for (i = 0; i < net_device->num_chn; i++)
stephen hemminger76bb5db2017-04-19 15:22:02 -0700579 netif_napi_del(&net_device->chan_table[i].napi);
stephen hemminger15a863b2017-02-27 10:26:49 -0800580
Bill Pemberton454f18a2009-07-27 16:47:24 -0400581 /* Release all resources */
stephen hemminger545a8e72017-03-22 14:51:00 -0700582 free_netvsc_device_rcu(net_device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700583}
584
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000585#define RING_AVAIL_PERCENT_HIWATER 20
586#define RING_AVAIL_PERCENT_LOWATER 10
587
588/*
589 * Get the percentage of available bytes to write in the ring.
590 * The return value is in range from 0 to 100.
591 */
592static inline u32 hv_ringbuf_avail_percent(
593 struct hv_ring_buffer_info *ring_info)
594{
595 u32 avail_read, avail_write;
596
597 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
598
599 return avail_write * 100 / ring_info->ring_datasize;
600}
601
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700602static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
603 u32 index)
604{
605 sync_change_bit(index, net_device->send_section_map);
606}
607
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700608static void netvsc_send_tx_complete(struct netvsc_device *net_device,
609 struct vmbus_channel *incoming_channel,
610 struct hv_device *device,
stephen hemmingerf9645432017-04-07 14:41:19 -0400611 const struct vmpacket_descriptor *desc,
612 int budget)
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700613{
stephen hemminger50698d82017-02-27 10:26:47 -0800614 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700615 struct net_device *ndev = hv_get_drvdata(device);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700616 struct vmbus_channel *channel = device->channel;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700617 u16 q_idx = 0;
618 int queue_sends;
619
620 /* Notify the layer above us */
621 if (likely(skb)) {
stephen hemminger793e3952017-01-24 13:06:12 -0800622 const struct hv_netvsc_packet *packet
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700623 = (struct hv_netvsc_packet *)skb->cb;
stephen hemminger793e3952017-01-24 13:06:12 -0800624 u32 send_index = packet->send_buf_index;
625 struct netvsc_stats *tx_stats;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700626
627 if (send_index != NETVSC_INVALID_INDEX)
628 netvsc_free_send_slot(net_device, send_index);
stephen hemminger793e3952017-01-24 13:06:12 -0800629 q_idx = packet->q_idx;
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700630 channel = incoming_channel;
631
Simon Xiao6c80f3f2017-01-24 13:06:13 -0800632 tx_stats = &net_device->chan_table[q_idx].tx_stats;
stephen hemminger793e3952017-01-24 13:06:12 -0800633
634 u64_stats_update_begin(&tx_stats->syncp);
635 tx_stats->packets += packet->total_packets;
636 tx_stats->bytes += packet->total_bytes;
637 u64_stats_update_end(&tx_stats->syncp);
638
stephen hemmingerf9645432017-04-07 14:41:19 -0400639 napi_consume_skb(skb, budget);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700640 }
641
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800642 queue_sends =
643 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700644
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800645 if (net_device->destroy && queue_sends == 0)
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700646 wake_up(&net_device->wait_drain);
647
648 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700649 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
650 queue_sends < 1))
651 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
652}
653
KY Srinivasan97c17232014-02-16 16:38:44 -0800654static void netvsc_send_completion(struct netvsc_device *net_device,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800655 struct vmbus_channel *incoming_channel,
KY Srinivasan97c17232014-02-16 16:38:44 -0800656 struct hv_device *device,
stephen hemmingerf9645432017-04-07 14:41:19 -0400657 const struct vmpacket_descriptor *desc,
658 int budget)
Hank Janssenfceaf242009-07-13 15:34:54 -0700659{
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800660 struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200661 struct net_device *ndev = hv_get_drvdata(device);
Hank Janssenfceaf242009-07-13 15:34:54 -0700662
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700663 switch (nvsp_packet->hdr.msg_type) {
664 case NVSP_MSG_TYPE_INIT_COMPLETE:
665 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
666 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
667 case NVSP_MSG5_TYPE_SUBCHANNEL:
Bill Pemberton454f18a2009-07-27 16:47:24 -0400668 /* Copy the response back */
Haiyang Zhang53d21fd2010-12-10 12:03:59 -0800669 memcpy(&net_device->channel_init_pkt, nvsp_packet,
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700670 sizeof(struct nvsp_message));
K. Y. Srinivasan35abb212011-05-10 07:55:41 -0700671 complete(&net_device->channel_init_wait);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700672 break;
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000673
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700674 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
675 netvsc_send_tx_complete(net_device, incoming_channel,
stephen hemmingerf9645432017-04-07 14:41:19 -0400676 device, desc, budget);
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700677 break;
Hank Janssenfceaf242009-07-13 15:34:54 -0700678
Stephen Hemmingerbc304dd2016-08-23 12:17:53 -0700679 default:
680 netdev_err(ndev,
681 "Unknown send completion type %d received!!\n",
682 nvsp_packet->hdr.msg_type);
Hank Janssenfceaf242009-07-13 15:34:54 -0700683 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700684}
685
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700686static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
687{
stephen hemmingerb58a1852017-01-24 13:06:14 -0800688 unsigned long *map_addr = net_device->send_section_map;
689 unsigned int i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700690
stephen hemmingerfdfb70d2017-04-24 18:33:38 -0700691 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
stephen hemmingerb58a1852017-01-24 13:06:14 -0800692 if (sync_test_and_set_bit(i, map_addr) == 0)
693 return i;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700694 }
stephen hemmingerb58a1852017-01-24 13:06:14 -0800695
696 return NETVSC_INVALID_INDEX;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700697}
698
Lad, Prabhakarda19fcd2015-02-05 15:06:33 +0000699static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
700 unsigned int section_index,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700701 u32 pend_size,
KY Srinivasan24476762015-12-01 16:43:06 -0800702 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800703 struct rndis_message *rndis_msg,
stephen hemminger02b6de02017-07-28 08:59:44 -0700704 struct hv_page_buffer *pb,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800705 struct sk_buff *skb)
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700706{
707 char *start = net_device->send_buf;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700708 char *dest = start + (section_index * net_device->send_section_size)
709 + pend_size;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700710 int i;
711 u32 msg_size = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700712 u32 padding = 0;
713 u32 remain = packet->total_data_buflen % net_device->pkt_align;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700714 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
715 packet->page_buf_cnt;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700716
717 /* Add padding */
stephen hemmingerebc1dcf2017-03-22 14:51:04 -0700718 if (skb->xmit_more && remain && !packet->cp_partial) {
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700719 padding = net_device->pkt_align - remain;
KY Srinivasan24476762015-12-01 16:43:06 -0800720 rndis_msg->msg_len += padding;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700721 packet->total_data_buflen += padding;
722 }
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700723
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700724 for (i = 0; i < page_count; i++) {
stephen hemminger02b6de02017-07-28 08:59:44 -0700725 char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
726 u32 offset = pb[i].offset;
727 u32 len = pb[i].len;
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700728
729 memcpy(dest, (src + offset), len);
730 msg_size += len;
731 dest += len;
732 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700733
734 if (padding) {
735 memset(dest, 0, padding);
736 msg_size += padding;
737 }
738
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700739 return msg_size;
740}
741
Stephen Hemminger3a8963a2016-09-09 12:45:24 -0700742static inline int netvsc_send_pkt(
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200743 struct hv_device *device,
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700744 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800745 struct netvsc_device *net_device,
stephen hemminger02b6de02017-07-28 08:59:44 -0700746 struct hv_page_buffer *pb,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800747 struct sk_buff *skb)
Hank Janssenfceaf242009-07-13 15:34:54 -0700748{
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700749 struct nvsp_message nvmsg;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800750 struct netvsc_channel *nvchan
751 = &net_device->chan_table[packet->q_idx];
752 struct vmbus_channel *out_channel = nvchan->channel;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200753 struct net_device *ndev = hv_get_drvdata(device);
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800754 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700755 u64 req_id;
756 int ret;
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700757 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
KY Srinivasanc25aaf82014-04-30 10:14:31 -0700758
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700759 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800760 if (skb != NULL) {
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700761 /* 0 is RMC_DATA; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700762 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700763 } else {
764 /* 1 is RMC_CONTROL; */
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700765 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700766 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700767
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700768 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
769 packet->send_buf_index;
770 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
771 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
772 else
773 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
774 packet->total_data_buflen;
Hank Janssenfceaf242009-07-13 15:34:54 -0700775
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800776 req_id = (ulong)skb;
Haiyang Zhangf1ea3cd2013-04-05 11:44:40 +0000777
Haiyang Zhangc3582a22014-12-01 13:28:39 -0800778 if (out_channel->rescind)
779 return -ENODEV;
780
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800781 if (packet->page_buf_cnt) {
stephen hemminger02b6de02017-07-28 08:59:44 -0700782 if (packet->cp_partial)
783 pb += packet->rmsg_pgcnt;
784
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700785 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
stephen hemminger02b6de02017-07-28 08:59:44 -0700786 pb, packet->page_buf_cnt,
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700787 &nvmsg,
788 sizeof(struct nvsp_message),
789 req_id,
Stephen Hemminger34543232017-02-05 17:20:34 -0700790 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Greg Kroah-Hartman21a808202009-09-02 10:33:05 -0700791 } else {
KY Srinivasan82fa3c72015-05-11 15:39:46 -0700792 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
793 sizeof(struct nvsp_message),
794 req_id,
795 VM_PKT_DATA_INBAND,
Stephen Hemminger34543232017-02-05 17:20:34 -0700796 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
Hank Janssenfceaf242009-07-13 15:34:54 -0700797 }
798
Haiyang Zhang1d068252011-12-02 11:56:25 -0800799 if (ret == 0) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800800 atomic_inc_return(&nvchan->queue_sends);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700801
stephen hemminger46b4f7f2017-01-24 13:06:11 -0800802 if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800803 netif_tx_stop_queue(txq);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800804 } else if (ret == -EAGAIN) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800805 netif_tx_stop_queue(txq);
806 if (atomic_read(&nvchan->queue_sends) < 1) {
807 netif_tx_wake_queue(txq);
Haiyang Zhang33be96e2012-03-27 13:20:45 +0000808 ret = -ENOSPC;
809 }
Haiyang Zhang1d068252011-12-02 11:56:25 -0800810 } else {
stephen hemminger4a2176c2017-07-28 08:59:43 -0700811 netdev_err(ndev,
812 "Unable to send packet pages %u len %u, ret %d\n",
813 packet->page_buf_cnt, packet->total_data_buflen,
814 ret);
Haiyang Zhang1d068252011-12-02 11:56:25 -0800815 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700816
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700817 return ret;
818}
819
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800820/* Move packet out of multi send data (msd), and clear msd */
821static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
822 struct sk_buff **msd_skb,
823 struct multi_send_data *msdp)
824{
825 *msd_skb = msdp->skb;
826 *msd_send = msdp->pkt;
827 msdp->skb = NULL;
828 msdp->pkt = NULL;
829 msdp->count = 0;
830}
831
stephen hemminger2a926f72017-07-19 11:53:17 -0700832/* RCU already held by caller */
833int netvsc_send(struct net_device_context *ndev_ctx,
KY Srinivasan24476762015-12-01 16:43:06 -0800834 struct hv_netvsc_packet *packet,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800835 struct rndis_message *rndis_msg,
stephen hemminger02b6de02017-07-28 08:59:44 -0700836 struct hv_page_buffer *pb,
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800837 struct sk_buff *skb)
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700838{
stephen hemminger39629812017-07-19 11:53:19 -0700839 struct netvsc_device *net_device
stephen hemminger867047c2017-07-28 08:59:42 -0700840 = rcu_dereference_bh(ndev_ctx->nvdev);
stephen hemminger2a926f72017-07-19 11:53:17 -0700841 struct hv_device *device = ndev_ctx->device_ctx;
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700842 int ret = 0;
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800843 struct netvsc_channel *nvchan;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700844 u32 pktlen = packet->total_data_buflen, msd_len = 0;
845 unsigned int section_index = NETVSC_INVALID_INDEX;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700846 struct multi_send_data *msdp;
847 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800848 struct sk_buff *msd_skb = NULL;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700849 bool try_batch;
KY Srinivasanbde79be2015-12-01 16:43:17 -0800850 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700851
stephen hemminger592b4fe2017-06-08 16:21:23 -0700852 /* If device is rescinded, return error and packet will get dropped. */
stephen hemminger2a926f72017-07-19 11:53:17 -0700853 if (unlikely(!net_device || net_device->destroy))
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700854 return -ENODEV;
855
Vitaly Kuznetsove8f0a892016-10-19 15:53:01 +0200856 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
857 * here before the negotiation with the host is finished and
858 * send_section_map may not be allocated yet.
859 */
stephen hemminger2d694d22017-06-08 16:21:21 -0700860 if (unlikely(!net_device->send_section_map))
Vitaly Kuznetsove8f0a892016-10-19 15:53:01 +0200861 return -EAGAIN;
862
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800863 nvchan = &net_device->chan_table[packet->q_idx];
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700864 packet->send_buf_index = NETVSC_INVALID_INDEX;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700865 packet->cp_partial = false;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700866
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800867 /* Send control message directly without accessing msd (Multi-Send
868 * Data) field which may be changed during data packet processing.
869 */
870 if (!skb) {
871 cur_send = packet;
872 goto send_now;
873 }
874
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700875 /* batch packets in send buffer if possible */
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800876 msdp = &nvchan->msd;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700877 if (msdp->pkt)
878 msd_len = msdp->pkt->total_data_buflen;
879
stephen hemmingerebc1dcf2017-03-22 14:51:04 -0700880 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700881 if (try_batch && msd_len + pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700882 net_device->send_section_size) {
883 section_index = msdp->pkt->send_buf_index;
884
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700885 } else if (try_batch && msd_len + packet->rmsg_size <
886 net_device->send_section_size) {
887 section_index = msdp->pkt->send_buf_index;
888 packet->cp_partial = true;
889
stephen hemmingerebc1dcf2017-03-22 14:51:04 -0700890 } else if (pktlen + net_device->pkt_align <
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700891 net_device->send_section_size) {
892 section_index = netvsc_get_next_send_section(net_device);
893 if (section_index != NETVSC_INVALID_INDEX) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800894 move_pkt_msd(&msd_send, &msd_skb, msdp);
895 msd_len = 0;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700896 }
897 }
898
899 if (section_index != NETVSC_INVALID_INDEX) {
900 netvsc_copy_to_send_buf(net_device,
901 section_index, msd_len,
KY Srinivasan694a9fb2015-12-01 16:43:15 -0800902 packet, rndis_msg, pb, skb);
KY Srinivasanb08cc792015-03-29 21:08:42 -0700903
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700904 packet->send_buf_index = section_index;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700905
906 if (packet->cp_partial) {
907 packet->page_buf_cnt -= packet->rmsg_pgcnt;
908 packet->total_data_buflen = msd_len + packet->rmsg_size;
909 } else {
910 packet->page_buf_cnt = 0;
911 packet->total_data_buflen += msd_len;
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700912 }
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700913
stephen hemminger793e3952017-01-24 13:06:12 -0800914 if (msdp->pkt) {
915 packet->total_packets += msdp->pkt->total_packets;
916 packet->total_bytes += msdp->pkt->total_bytes;
917 }
918
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800919 if (msdp->skb)
Stephen Hemminger17db4bc2016-09-22 16:56:29 -0700920 dev_consume_skb_any(msdp->skb);
Haiyang Zhangee90b812015-04-06 15:22:54 -0700921
KY Srinivasanbde79be2015-12-01 16:43:17 -0800922 if (xmit_more && !packet->cp_partial) {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800923 msdp->skb = skb;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700924 msdp->pkt = packet;
925 msdp->count++;
926 } else {
927 cur_send = packet;
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800928 msdp->skb = NULL;
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700929 msdp->pkt = NULL;
930 msdp->count = 0;
931 }
932 } else {
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800933 move_pkt_msd(&msd_send, &msd_skb, msdp);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700934 cur_send = packet;
935 }
936
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700937 if (msd_send) {
Stephen Hemminger6c4c1372016-08-23 12:17:55 -0700938 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
939 NULL, msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700940
941 if (m_ret != 0) {
942 netvsc_free_send_slot(net_device,
943 msd_send->send_buf_index);
Haiyang Zhangc85e4922016-01-25 09:49:31 -0800944 dev_kfree_skb_any(msd_skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700945 }
946 }
947
Haiyang Zhangcf8190e2015-12-10 12:19:35 -0800948send_now:
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700949 if (cur_send)
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +0200950 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
Haiyang Zhang7c3877f2015-03-26 09:03:37 -0700951
Jerry Snitselaar7aab5152015-05-04 10:57:16 -0700952 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
953 netvsc_free_send_slot(net_device, section_index);
Haiyang Zhangd953ca42015-01-29 12:34:49 -0800954
Hank Janssenfceaf242009-07-13 15:34:54 -0700955 return ret;
956}
957
stephen hemminger7426b1a2017-07-28 08:59:45 -0700958/* Send pending recv completions */
959static int send_recv_completions(struct netvsc_channel *nvchan)
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700960{
stephen hemminger7426b1a2017-07-28 08:59:45 -0700961 struct netvsc_device *nvdev = nvchan->net_device;
962 struct multi_recv_comp *mrc = &nvchan->mrc;
963 struct recv_comp_msg {
964 struct nvsp_message_header hdr;
965 u32 status;
966 } __packed;
967 struct recv_comp_msg msg = {
968 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
969 };
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700970 int ret;
971
stephen hemminger7426b1a2017-07-28 08:59:45 -0700972 while (mrc->first != mrc->next) {
973 const struct recv_comp_data *rcd
974 = mrc->slots + mrc->first;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700975
stephen hemminger7426b1a2017-07-28 08:59:45 -0700976 msg.status = rcd->status;
977 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
978 rcd->tid, VM_PKT_COMP, 0);
979 if (unlikely(ret))
980 return ret;
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700981
stephen hemminger7426b1a2017-07-28 08:59:45 -0700982 if (++mrc->first == nvdev->recv_completion_cnt)
983 mrc->first = 0;
984 }
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -0700985
stephen hemminger7426b1a2017-07-28 08:59:45 -0700986 /* receive completion ring has been emptied */
987 if (unlikely(nvdev->destroy))
988 wake_up(&nvdev->wait_drain);
989
990 return 0;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700991}
992
stephen hemminger7426b1a2017-07-28 08:59:45 -0700993/* Count how many receive completions are outstanding */
994static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
995 const struct multi_recv_comp *mrc,
996 u32 *filled, u32 *avail)
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700997{
stephen hemminger7426b1a2017-07-28 08:59:45 -0700998 u32 count = nvdev->recv_completion_cnt;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -0700999
stephen hemminger7426b1a2017-07-28 08:59:45 -07001000 if (mrc->next >= mrc->first)
1001 *filled = mrc->next - mrc->first;
1002 else
1003 *filled = (count - mrc->first) + mrc->next;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001004
stephen hemminger7426b1a2017-07-28 08:59:45 -07001005 *avail = count - *filled - 1;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001006}
1007
stephen hemminger7426b1a2017-07-28 08:59:45 -07001008/* Add receive complete to ring to send to host. */
1009static void enq_receive_complete(struct net_device *ndev,
1010 struct netvsc_device *nvdev, u16 q_idx,
1011 u64 tid, u32 status)
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001012{
stephen hemminger7426b1a2017-07-28 08:59:45 -07001013 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1014 struct multi_recv_comp *mrc = &nvchan->mrc;
1015 struct recv_comp_data *rcd;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001016 u32 filled, avail;
1017
stephen hemminger7426b1a2017-07-28 08:59:45 -07001018 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001019
stephen hemminger7426b1a2017-07-28 08:59:45 -07001020 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1021 send_recv_completions(nvchan);
1022 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001023 }
Haiyang Zhang5fa9d3c2011-04-21 12:30:42 -07001024
stephen hemminger7426b1a2017-07-28 08:59:45 -07001025 if (unlikely(!avail)) {
1026 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1027 q_idx, tid);
1028 return;
1029 }
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001030
stephen hemminger7426b1a2017-07-28 08:59:45 -07001031 rcd = mrc->slots + mrc->next;
1032 rcd->tid = tid;
1033 rcd->status = status;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001034
stephen hemminger7426b1a2017-07-28 08:59:45 -07001035 if (++mrc->next == nvdev->recv_completion_cnt)
1036 mrc->next = 0;
Haiyang Zhangc0b558e2016-08-19 14:47:09 -07001037}
1038
stephen hemminger15a863b2017-02-27 10:26:49 -08001039static int netvsc_receive(struct net_device *ndev,
stephen hemminger7426b1a2017-07-28 08:59:45 -07001040 struct netvsc_device *net_device,
1041 struct net_device_context *net_device_ctx,
1042 struct hv_device *device,
1043 struct vmbus_channel *channel,
1044 const struct vmpacket_descriptor *desc,
1045 struct nvsp_message *nvsp)
Hank Janssenfceaf242009-07-13 15:34:54 -07001046{
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001047 const struct vmtransfer_page_packet_header *vmxferpage_packet
1048 = container_of(desc, const struct vmtransfer_page_packet_header, d);
stephen hemminger15a863b2017-02-27 10:26:49 -08001049 u16 q_idx = channel->offermsg.offer.sub_channel_index;
stephen hemmingerdc54a082017-01-24 13:06:08 -08001050 char *recv_buf = net_device->recv_buf;
Haiyang Zhang4baab262014-04-21 14:54:43 -07001051 u32 status = NVSP_STAT_SUCCESS;
Haiyang Zhang45326342011-12-15 13:45:15 -08001052 int i;
1053 int count = 0;
K. Y. Srinivasan779b4d12011-04-26 09:20:22 -07001054
Bill Pemberton454f18a2009-07-27 16:47:24 -04001055 /* Make sure this is a valid nvsp packet */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001056 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1057 netif_err(net_device_ctx, rx_err, ndev,
1058 "Unknown nvsp packet type received %u\n",
1059 nvsp->hdr.msg_type);
stephen hemminger15a863b2017-02-27 10:26:49 -08001060 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -07001061 }
1062
stephen hemmingerdc54a082017-01-24 13:06:08 -08001063 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1064 netif_err(net_device_ctx, rx_err, ndev,
1065 "Invalid xfer page set id - expecting %x got %x\n",
1066 NETVSC_RECEIVE_BUFFER_ID,
1067 vmxferpage_packet->xfer_pageset_id);
stephen hemminger15a863b2017-02-27 10:26:49 -08001068 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -07001069 }
1070
Haiyang Zhang4baab262014-04-21 14:54:43 -07001071 count = vmxferpage_packet->range_cnt;
Hank Janssenfceaf242009-07-13 15:34:54 -07001072
Bill Pemberton454f18a2009-07-27 16:47:24 -04001073 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
Haiyang Zhang4baab262014-04-21 14:54:43 -07001074 for (i = 0; i < count; i++) {
stephen hemmingerdc54a082017-01-24 13:06:08 -08001075 void *data = recv_buf
1076 + vmxferpage_packet->ranges[i].byte_offset;
1077 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
Hank Janssenfceaf242009-07-13 15:34:54 -07001078
Bill Pemberton454f18a2009-07-27 16:47:24 -04001079 /* Pass it to the upper layer */
stephen hemmingerdc54a082017-01-24 13:06:08 -08001080 status = rndis_filter_receive(ndev, net_device, device,
1081 channel, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -07001082 }
1083
stephen hemminger7426b1a2017-07-28 08:59:45 -07001084 enq_receive_complete(ndev, net_device, q_idx,
1085 vmxferpage_packet->d.trans_id, status);
stephen hemminger15a863b2017-02-27 10:26:49 -08001086
stephen hemminger15a863b2017-02-27 10:26:49 -08001087 return count;
Hank Janssenfceaf242009-07-13 15:34:54 -07001088}
1089
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001090static void netvsc_send_table(struct hv_device *hdev,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001091 struct nvsp_message *nvmsg)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001092{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001093 struct net_device *ndev = hv_get_drvdata(hdev);
stephen hemminger7ce10122017-03-09 14:58:29 -08001094 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001095 int i;
1096 u32 count, *tab;
1097
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001098 count = nvmsg->msg.v5_msg.send_table.count;
1099 if (count != VRSS_SEND_TAB_SIZE) {
1100 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1101 return;
1102 }
1103
1104 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1105 nvmsg->msg.v5_msg.send_table.offset);
1106
1107 for (i = 0; i < count; i++)
stephen hemminger7ce10122017-03-09 14:58:29 -08001108 net_device_ctx->tx_send_table[i] = tab[i];
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001109}
1110
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001111static void netvsc_send_vf(struct net_device_context *net_device_ctx,
Haiyang Zhang71790a22015-07-24 10:08:40 -07001112 struct nvsp_message *nvmsg)
1113{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001114 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1115 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
Haiyang Zhang71790a22015-07-24 10:08:40 -07001116}
1117
1118static inline void netvsc_receive_inband(struct hv_device *hdev,
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001119 struct net_device_context *net_device_ctx,
1120 struct nvsp_message *nvmsg)
Haiyang Zhang71790a22015-07-24 10:08:40 -07001121{
1122 switch (nvmsg->hdr.msg_type) {
1123 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1124 netvsc_send_table(hdev, nvmsg);
1125 break;
1126
1127 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001128 netvsc_send_vf(net_device_ctx, nvmsg);
Haiyang Zhang71790a22015-07-24 10:08:40 -07001129 break;
1130 }
1131}
1132
stephen hemminger15a863b2017-02-27 10:26:49 -08001133static int netvsc_process_raw_pkt(struct hv_device *device,
1134 struct vmbus_channel *channel,
1135 struct netvsc_device *net_device,
1136 struct net_device *ndev,
stephen hemmingerf9645432017-04-07 14:41:19 -04001137 const struct vmpacket_descriptor *desc,
1138 int budget)
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001139{
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001140 struct net_device_context *net_device_ctx = netdev_priv(ndev);
stephen hemmingerf3dd3f42017-02-27 10:26:48 -08001141 struct nvsp_message *nvmsg = hv_pkt_data(desc);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001142
1143 switch (desc->type) {
1144 case VM_PKT_COMP:
stephen hemmingerf9645432017-04-07 14:41:19 -04001145 netvsc_send_completion(net_device, channel, device,
1146 desc, budget);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001147 break;
1148
1149 case VM_PKT_DATA_USING_XFER_PAGES:
stephen hemminger15a863b2017-02-27 10:26:49 -08001150 return netvsc_receive(ndev, net_device, net_device_ctx,
1151 device, channel, desc, nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001152 break;
1153
1154 case VM_PKT_DATA_INBAND:
Vitaly Kuznetsovf9a7da92016-08-15 17:48:39 +02001155 netvsc_receive_inband(device, net_device_ctx, nvmsg);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001156 break;
1157
1158 default:
1159 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001160 desc->type, desc->trans_id);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001161 break;
1162 }
stephen hemminger15a863b2017-02-27 10:26:49 -08001163
1164 return 0;
1165}
1166
1167static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1168{
1169 struct vmbus_channel *primary = channel->primary_channel;
1170
1171 return primary ? primary->device_obj : channel->device_obj;
1172}
1173
stephen hemminger262b7f12017-03-16 16:12:38 -07001174/* Network processing softirq
1175 * Process data in incoming ring buffer from host
1176 * Stops when ring is empty or budget is met or exceeded.
1177 */
stephen hemminger15a863b2017-02-27 10:26:49 -08001178int netvsc_poll(struct napi_struct *napi, int budget)
1179{
1180 struct netvsc_channel *nvchan
1181 = container_of(napi, struct netvsc_channel, napi);
stephen hemminger35fbbcc2017-07-19 11:53:18 -07001182 struct netvsc_device *net_device = nvchan->net_device;
stephen hemminger15a863b2017-02-27 10:26:49 -08001183 struct vmbus_channel *channel = nvchan->channel;
1184 struct hv_device *device = netvsc_channel_to_device(channel);
stephen hemminger15a863b2017-02-27 10:26:49 -08001185 struct net_device *ndev = hv_get_drvdata(device);
stephen hemminger15a863b2017-02-27 10:26:49 -08001186 int work_done = 0;
1187
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001188 /* If starting a new interval */
1189 if (!nvchan->desc)
1190 nvchan->desc = hv_pkt_iter_first(channel);
stephen hemminger15a863b2017-02-27 10:26:49 -08001191
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001192 while (nvchan->desc && work_done < budget) {
1193 work_done += netvsc_process_raw_pkt(device, channel, net_device,
stephen hemmingerf9645432017-04-07 14:41:19 -04001194 ndev, nvchan->desc, budget);
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001195 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
stephen hemminger15a863b2017-02-27 10:26:49 -08001196 }
stephen hemminger15a863b2017-02-27 10:26:49 -08001197
stephen hemminger7426b1a2017-07-28 08:59:45 -07001198 /* If send of pending receive completions suceeded
1199 * and did not exhaust NAPI budget
1200 * and not doing busy poll
1201 * then reschedule if more data has arrived from host
stephen hemminger262b7f12017-03-16 16:12:38 -07001202 */
stephen hemminger7426b1a2017-07-28 08:59:45 -07001203 if (send_recv_completions(nvchan) == 0 &&
1204 work_done < budget &&
stephen hemminger15a863b2017-02-27 10:26:49 -08001205 napi_complete_done(napi, work_done) &&
stephen hemminger7426b1a2017-07-28 08:59:45 -07001206 hv_end_read(&channel->inbound)) {
1207 hv_begin_read(&channel->inbound);
stephen hemminger15a863b2017-02-27 10:26:49 -08001208 napi_reschedule(napi);
stephen hemminger7426b1a2017-07-28 08:59:45 -07001209 }
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001210
1211 /* Driver may overshoot since multiple packets per descriptor */
1212 return min(work_done, budget);
K. Y. Srinivasan99a50bb2016-07-05 16:52:46 -07001213}
1214
stephen hemminger262b7f12017-03-16 16:12:38 -07001215/* Call back when data is available in host ring buffer.
1216 * Processing is deferred until network softirq (NAPI)
1217 */
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001218void netvsc_channel_cb(void *context)
Hank Janssenfceaf242009-07-13 15:34:54 -07001219{
stephen hemminger6de38af2017-03-16 16:12:37 -07001220 struct netvsc_channel *nvchan = context;
stephen hemminger43bf99c2017-07-24 10:57:27 -07001221 struct vmbus_channel *channel = nvchan->channel;
1222 struct hv_ring_buffer_info *rbi = &channel->inbound;
1223
1224 /* preload first vmpacket descriptor */
1225 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
stephen hemminger0b307eb2017-01-24 13:05:58 -08001226
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001227 if (napi_schedule_prep(&nvchan->napi)) {
1228 /* disable interupts from host */
stephen hemminger43bf99c2017-07-24 10:57:27 -07001229 hv_begin_read(rbi);
stephen hemminger0d6dd352017-03-09 15:04:14 -08001230
stephen hemmingerf4f1c232017-03-22 14:50:57 -07001231 __napi_schedule(&nvchan->napi);
1232 }
Hank Janssenfceaf242009-07-13 15:34:54 -07001233}
Haiyang Zhangaf24ce42011-04-21 12:30:40 -07001234
1235/*
Haiyang Zhangb637e022011-04-21 12:30:45 -07001236 * netvsc_device_add - Callback when the device belonging to this
1237 * driver is added
1238 */
stephen hemminger9749fed2017-07-19 11:53:16 -07001239struct netvsc_device *netvsc_device_add(struct hv_device *device,
1240 const struct netvsc_device_info *device_info)
Haiyang Zhangb637e022011-04-21 12:30:45 -07001241{
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001242 int i, ret = 0;
stephen hemminger2c7f83c2017-01-24 13:06:09 -08001243 int ring_size = device_info->ring_size;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001244 struct netvsc_device *net_device;
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001245 struct net_device *ndev = hv_get_drvdata(device);
1246 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001247
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001248 net_device = alloc_net_device();
Dan Carpenterb1c84922014-09-04 14:11:23 +03001249 if (!net_device)
stephen hemminger9749fed2017-07-19 11:53:16 -07001250 return ERR_PTR(-ENOMEM);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001251
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001252 net_device->ring_size = ring_size;
1253
stephen hemminger15a863b2017-02-27 10:26:49 -08001254 /* Because the device uses NAPI, all the interrupt batching and
1255 * control is done via Net softirq, not the channel handling
1256 */
1257 set_channel_read_mode(device->channel, HV_CALL_ISR);
1258
K. Y. Srinivasanbffb1842017-04-06 14:59:21 -07001259 /* If we're reopening the device we may have multiple queues, fill the
1260 * chn_table with the default channel to use it before subchannels are
1261 * opened.
1262 * Initialize the channel state before we open;
1263 * we can be interrupted as soon as we open the channel.
1264 */
1265
1266 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1267 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1268
1269 nvchan->channel = device->channel;
stephen hemminger35fbbcc2017-07-19 11:53:18 -07001270 nvchan->net_device = net_device;
K. Y. Srinivasanbffb1842017-04-06 14:59:21 -07001271 }
1272
stephen hemminger2be0f262017-05-03 16:59:21 -07001273 /* Enable NAPI handler before init callbacks */
1274 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1275 netvsc_poll, NAPI_POLL_WEIGHT);
1276
Haiyang Zhangb637e022011-04-21 12:30:45 -07001277 /* Open the channel */
K. Y. Srinivasanaae23982011-05-12 19:35:05 -07001278 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1279 ring_size * PAGE_SIZE, NULL, 0,
stephen hemminger6de38af2017-03-16 16:12:37 -07001280 netvsc_channel_cb,
1281 net_device->chan_table);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001282
1283 if (ret != 0) {
stephen hemminger2be0f262017-05-03 16:59:21 -07001284 netif_napi_del(&net_device->chan_table[0].napi);
Haiyang Zhangd9871152011-09-01 12:19:41 -07001285 netdev_err(ndev, "unable to open channel: %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001286 goto cleanup;
1287 }
1288
1289 /* Channel is opened */
Vitaly Kuznetsov93ba2222016-11-28 18:25:44 +01001290 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
Haiyang Zhangb637e022011-04-21 12:30:45 -07001291
stephen hemminger15a863b2017-02-27 10:26:49 -08001292 napi_enable(&net_device->chan_table[0].napi);
Vitaly Kuznetsov88098832016-05-13 13:55:25 +02001293
1294 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1295 * populated.
1296 */
stephen hemminger545a8e72017-03-22 14:51:00 -07001297 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001298
Haiyang Zhangb637e022011-04-21 12:30:45 -07001299 /* Connect with the NetVsp */
stephen hemminger95790832017-06-08 16:21:22 -07001300 ret = netvsc_connect_vsp(device, net_device);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001301 if (ret != 0) {
Haiyang Zhangd9871152011-09-01 12:19:41 -07001302 netdev_err(ndev,
Haiyang Zhangc909ebb2011-09-01 12:19:40 -07001303 "unable to connect to NetVSP - %d\n", ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001304 goto close;
1305 }
1306
stephen hemminger9749fed2017-07-19 11:53:16 -07001307 return net_device;
Haiyang Zhangb637e022011-04-21 12:30:45 -07001308
1309close:
stephen hemminger76bb5db2017-04-19 15:22:02 -07001310 netif_napi_del(&net_device->chan_table[0].napi);
stephen hemminger15a863b2017-02-27 10:26:49 -08001311
Haiyang Zhangb637e022011-04-21 12:30:45 -07001312 /* Now, we can close the channel safely */
1313 vmbus_close(device->channel);
1314
1315cleanup:
stephen hemminger545a8e72017-03-22 14:51:00 -07001316 free_netvsc_device(&net_device->rcu);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001317
stephen hemminger9749fed2017-07-19 11:53:16 -07001318 return ERR_PTR(ret);
Haiyang Zhangb637e022011-04-21 12:30:45 -07001319}