blob: 9a03b163cbbda3ebb4e724216e12d198f4604e73 [file] [log] [blame]
Thomas Gleixner3b20eb22019-05-29 16:57:35 -07001// SPDX-License-Identifier: GPL-2.0-only
Hank Janssen3e7ee492009-07-13 16:02:34 -07002/*
3 *
4 * Copyright (c) 2009, Microsoft Corporation.
5 *
Hank Janssen3e7ee492009-07-13 16:02:34 -07006 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -07009 * K. Y. Srinivasan <kys@microsoft.com>
Hank Janssen3e7ee492009-07-13 16:02:34 -070010 */
Hank Janssen0a466182011-03-29 13:58:47 -070011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Hank Janssen3e7ee492009-07-13 16:02:34 -070012
Greg Kroah-Hartmana0086dc2009-08-17 17:22:08 -070013#include <linux/kernel.h>
14#include <linux/mm.h>
Greg Kroah-Hartman46a97192011-10-04 12:29:52 -070015#include <linux/hyperv.h>
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -080016#include <linux/uio.h>
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -070017#include <linux/vmalloc.h>
18#include <linux/slab.h>
Stephen Hemminger8dd45f22017-06-25 12:30:26 -070019#include <linux/prefetch.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070020
K. Y. Srinivasan0f2a6612011-05-12 19:34:28 -070021#include "hyperv_vmbus.h"
Hank Janssen3e7ee492009-07-13 16:02:34 -070022
stephen hemmingerf3dd3f42017-02-27 10:26:48 -080023#define VMBUS_PKT_TRAILER 8
24
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080025/*
26 * When we write to the ring buffer, check if the host needs to
27 * be signaled. Here is the details of this protocol:
28 *
29 * 1. The host guarantees that while it is draining the
30 * ring buffer, it will set the interrupt_mask to
31 * indicate it does not need to be interrupted when
32 * new data is placed.
33 *
34 * 2. The host guarantees that it will completely drain
35 * the ring buffer before exiting the read loop. Further,
36 * once the ring buffer is empty, it will clear the
37 * interrupt_mask and re-check to see if new data has
38 * arrived.
K. Y. Srinivasan1f6ee4e2016-11-06 13:14:17 -080039 *
40 * KYS: Oct. 30, 2016:
41 * It looks like Windows hosts have logic to deal with DOS attacks that
42 * can be triggered if it receives interrupts when it is not expecting
43 * the interrupt. The host expects interrupts only when the ring
44 * transitions from empty to non-empty (or full to non full on the guest
45 * to host ring).
46 * So, base the signaling decision solely on the ring state until the
47 * host logic is fixed.
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080048 */
49
Stephen Hemmingerb103a562017-02-05 17:20:32 -070050static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080051{
K. Y. Srinivasan1f6ee4e2016-11-06 13:14:17 -080052 struct hv_ring_buffer_info *rbi = &channel->outbound;
53
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -070054 virt_mb();
K. Y. Srinivasand45faae2016-04-02 17:59:47 -070055 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
K. Y. Srinivasan1f6ee4e2016-11-06 13:14:17 -080056 return;
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080057
Jason Wange91e84f2013-06-20 12:58:57 +080058 /* check interrupt_mask before read_index */
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -070059 virt_rmb();
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080060 /*
61 * This is the only case we need to signal when the
62 * ring transitions from being empty to non-empty.
63 */
Kimberly Brown396ae572019-02-04 02:13:09 -050064 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
65 ++channel->intr_out_empty;
K. Y. Srinivasan1f6ee4e2016-11-06 13:14:17 -080066 vmbus_setevent(channel);
Kimberly Brown396ae572019-02-04 02:13:09 -050067 }
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080068}
69
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -080070/* Get the next write location for the specified ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -070071static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -070072hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -070073{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080074 u32 next = ring_info->ring_buffer->write_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -070075
Hank Janssen3e7ee492009-07-13 16:02:34 -070076 return next;
77}
78
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -080079/* Set the next write location for the specified ring buffer. */
Hank Janssen3e7ee492009-07-13 16:02:34 -070080static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -070081hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080082 u32 next_write_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -070083{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080084 ring_info->ring_buffer->write_index = next_write_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -070085}
86
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -080087/* Set the next read location for the specified ring buffer. */
Hank Janssen3e7ee492009-07-13 16:02:34 -070088static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -070089hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080090 u32 next_read_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -070091{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080092 ring_info->ring_buffer->read_index = next_read_location;
K. Y. Srinivasanab028db2016-04-02 17:59:51 -070093 ring_info->priv_read_index = next_read_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -070094}
95
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -080096/* Get the size of the ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -070097static inline u32
Stephen Hemmingere4165a02017-02-11 23:02:24 -070098hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -070099{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800100 return ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700101}
102
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800103/* Get the read and write indices as u64 of the specified ring buffer. */
Greg Kroah-Hartman59471432009-07-14 15:10:26 -0700104static inline u64
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700105hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700106{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800107 return (u64)ring_info->ring_buffer->write_index << 32;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700108}
109
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700110/*
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700111 * Helper routine to copy from source to ring buffer.
112 * Assume there is enough room. Handles wrap-around in dest case only!!
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700113 */
114static u32 hv_copyto_ringbuffer(
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800115 struct hv_ring_buffer_info *ring_info,
116 u32 start_write_offset,
Stephen Hemmingere4165a02017-02-11 23:02:24 -0700117 const void *src,
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700118 u32 srclen)
119{
120 void *ring_buffer = hv_get_ring_buffer(ring_info);
121 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700122
Vitaly Kuznetsovf24f0b42016-09-02 05:58:21 -0700123 memcpy(ring_buffer + start_write_offset, src, srclen);
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700124
125 start_write_offset += srclen;
Stephen Hemminger8d12f882017-02-11 23:02:25 -0700126 if (start_write_offset >= ring_buffer_size)
127 start_write_offset -= ring_buffer_size;
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700128
129 return start_write_offset;
130}
Hank Janssen3e7ee492009-07-13 16:02:34 -0700131
Stephen Hemminger04874262017-12-01 11:01:49 -0800132/*
133 *
134 * hv_get_ringbuffer_availbytes()
135 *
136 * Get number of bytes available to read and to write to
137 * for the specified ring buffer
138 */
139static void
140hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
141 u32 *read, u32 *write)
142{
143 u32 read_loc, write_loc, dsize;
144
145 /* Capture the read/write indices before they changed */
146 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
147 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
148 dsize = rbi->ring_datasize;
149
150 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
151 read_loc - write_loc;
152 *read = dsize - *write;
153}
154
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800155/* Get various debug metrics for the specified ring buffer. */
Kimberly Brown14948e32019-03-14 16:05:15 -0400156int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
Dexuan Cuiba50bf12018-12-17 20:16:09 +0000157 struct hv_ring_buffer_debug_info *debug_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700158{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800159 u32 bytes_avail_towrite;
160 u32 bytes_avail_toread;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700161
Kimberly Brown14948e32019-03-14 16:05:15 -0400162 mutex_lock(&ring_info->ring_buffer_mutex);
163
164 if (!ring_info->ring_buffer) {
165 mutex_unlock(&ring_info->ring_buffer_mutex);
Dexuan Cuiba50bf12018-12-17 20:16:09 +0000166 return -EINVAL;
Kimberly Brown14948e32019-03-14 16:05:15 -0400167 }
Hank Janssen3e7ee492009-07-13 16:02:34 -0700168
Dexuan Cuiba50bf12018-12-17 20:16:09 +0000169 hv_get_ringbuffer_availbytes(ring_info,
170 &bytes_avail_toread,
171 &bytes_avail_towrite);
172 debug_info->bytes_avail_toread = bytes_avail_toread;
173 debug_info->bytes_avail_towrite = bytes_avail_towrite;
174 debug_info->current_read_index = ring_info->ring_buffer->read_index;
175 debug_info->current_write_index = ring_info->ring_buffer->write_index;
176 debug_info->current_interrupt_mask
177 = ring_info->ring_buffer->interrupt_mask;
Kimberly Brown14948e32019-03-14 16:05:15 -0400178 mutex_unlock(&ring_info->ring_buffer_mutex);
179
Dexuan Cuiba50bf12018-12-17 20:16:09 +0000180 return 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700181}
Stephen Hemminger4827ee12017-03-04 18:27:18 -0700182EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700183
Kimberly Brown14948e32019-03-14 16:05:15 -0400184/* Initialize a channel's ring buffer info mutex locks */
185void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
186{
187 mutex_init(&channel->inbound.ring_buffer_mutex);
188 mutex_init(&channel->outbound.ring_buffer_mutex);
189}
190
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800191/* Initialize the ring buffer. */
K. Y. Srinivasan72a95cb2011-05-10 07:55:21 -0700192int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -0700193 struct page *pages, u32 page_cnt)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700194{
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -0700195 int i;
196 struct page **pages_wraparound;
197
198 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700199
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -0700200 /*
201 * First page holds struct hv_ring_buffer, do wraparound mapping for
202 * the rest.
203 */
Kees Cook6396bb22018-06-12 14:03:40 -0700204 pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -0700205 GFP_KERNEL);
206 if (!pages_wraparound)
207 return -ENOMEM;
208
209 pages_wraparound[0] = pages;
210 for (i = 0; i < 2 * (page_cnt - 1); i++)
211 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
212
213 ring_info->ring_buffer = (struct hv_ring_buffer *)
214 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
215
216 kfree(pages_wraparound);
217
218
219 if (!ring_info->ring_buffer)
220 return -ENOMEM;
221
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800222 ring_info->ring_buffer->read_index =
223 ring_info->ring_buffer->write_index = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700224
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800225 /* Set the feature bit for enabling flow control. */
K. Y. Srinivasan046c7912014-09-05 17:29:12 -0700226 ring_info->ring_buffer->feature_bits.value = 1;
227
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -0700228 ring_info->ring_size = page_cnt << PAGE_SHIFT;
Long Li63273cb2018-03-27 17:48:38 -0700229 ring_info->ring_size_div10_reciprocal =
230 reciprocal_value(ring_info->ring_size / 10);
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -0700231 ring_info->ring_datasize = ring_info->ring_size -
232 sizeof(struct hv_ring_buffer);
Kimberly Brown4713eb72019-03-14 16:05:07 -0400233 ring_info->priv_read_index = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700234
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800235 spin_lock_init(&ring_info->ring_lock);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700236
237 return 0;
238}
239
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800240/* Cleanup the ring buffer. */
K. Y. Srinivasan2dba6882011-05-10 07:55:22 -0700241void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700242{
Kimberly Brown14948e32019-03-14 16:05:15 -0400243 mutex_lock(&ring_info->ring_buffer_mutex);
Vitaly Kuznetsov9988ce62016-09-02 05:58:20 -0700244 vunmap(ring_info->ring_buffer);
Stephen Hemmingerae6935e2018-09-14 09:10:17 -0700245 ring_info->ring_buffer = NULL;
Kimberly Brown14948e32019-03-14 16:05:15 -0400246 mutex_unlock(&ring_info->ring_buffer_mutex);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700247}
248
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800249/* Write to the ring buffer. */
K. Y. Srinivasan1f6ee4e2016-11-06 13:14:17 -0800250int hv_ringbuffer_write(struct vmbus_channel *channel,
Stephen Hemmingere4165a02017-02-11 23:02:24 -0700251 const struct kvec *kv_list, u32 kv_count)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700252{
Stephen Hemminger2c616a82017-03-04 18:27:13 -0700253 int i;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800254 u32 bytes_avail_towrite;
Stephen Hemminger2c616a82017-03-04 18:27:13 -0700255 u32 totalbytes_towrite = sizeof(u64);
K. Y. Srinivasan66a60542011-05-10 07:55:33 -0700256 u32 next_write_location;
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800257 u32 old_write;
Stephen Hemminger2c616a82017-03-04 18:27:13 -0700258 u64 prev_indices;
259 unsigned long flags;
K. Y. Srinivasan1f6ee4e2016-11-06 13:14:17 -0800260 struct hv_ring_buffer_info *outring_info = &channel->outbound;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700261
K. Y. Srinivasane7e97dd2016-12-07 01:16:28 -0800262 if (channel->rescind)
263 return -ENODEV;
264
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800265 for (i = 0; i < kv_count; i++)
266 totalbytes_towrite += kv_list[i].iov_len;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700267
Stephen Hemminger5529eaf2017-02-11 23:02:22 -0700268 spin_lock_irqsave(&outring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700269
K. Y. Srinivasana6341f02016-04-02 17:59:46 -0700270 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700271
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800272 /*
273 * If there is only room for the packet, assume it is full.
274 * Otherwise, the next time around, we think the ring buffer
275 * is empty since the read index == write index.
276 */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800277 if (bytes_avail_towrite <= totalbytes_towrite) {
Kimberly Brown396ae572019-02-04 02:13:09 -0500278 ++channel->out_full_total;
279
280 if (!channel->out_full_flag) {
281 ++channel->out_full_first;
282 channel->out_full_flag = true;
283 }
284
Stephen Hemminger5529eaf2017-02-11 23:02:22 -0700285 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700286 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700287 }
288
Kimberly Brown396ae572019-02-04 02:13:09 -0500289 channel->out_full_flag = false;
290
Bill Pemberton454f18a2009-07-27 16:47:24 -0400291 /* Write to the ring buffer */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700292 next_write_location = hv_get_next_write_location(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700293
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800294 old_write = next_write_location;
295
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800296 for (i = 0; i < kv_count; i++) {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700297 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800298 next_write_location,
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800299 kv_list[i].iov_base,
300 kv_list[i].iov_len);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700301 }
302
Bill Pemberton454f18a2009-07-27 16:47:24 -0400303 /* Set previous packet start */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700304 prev_indices = hv_get_ring_bufferindices(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700305
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700306 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800307 next_write_location,
308 &prev_indices,
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200309 sizeof(u64));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700310
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800311 /* Issue a full memory barrier before updating the write index */
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -0700312 virt_mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700313
Bill Pemberton454f18a2009-07-27 16:47:24 -0400314 /* Now, update the write location */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700315 hv_set_next_write_location(outring_info, next_write_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700316
Hank Janssen3e7ee492009-07-13 16:02:34 -0700317
Stephen Hemminger5529eaf2017-02-11 23:02:22 -0700318 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800319
Stephen Hemmingerb103a562017-02-05 17:20:32 -0700320 hv_signal_on_write(old_write, channel);
K. Y. Srinivasane7e97dd2016-12-07 01:16:28 -0800321
322 if (channel->rescind)
323 return -ENODEV;
324
Hank Janssen3e7ee492009-07-13 16:02:34 -0700325 return 0;
326}
327
K. Y. Srinivasan33725922016-11-06 13:14:18 -0800328int hv_ringbuffer_read(struct vmbus_channel *channel,
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800329 void *buffer, u32 buflen, u32 *buffer_actual_len,
K. Y. Srinivasan33725922016-11-06 13:14:18 -0800330 u64 *requestid, bool raw)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700331{
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700332 struct vmpacket_descriptor *desc;
333 u32 packetlen, offset;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700334
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700335 if (unlikely(buflen == 0))
Bill Pembertona16e1482010-05-05 15:27:50 -0400336 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700337
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800338 *buffer_actual_len = 0;
339 *requestid = 0;
340
Bill Pemberton454f18a2009-07-27 16:47:24 -0400341 /* Make sure there is something to read */
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700342 desc = hv_pkt_iter_first(channel);
343 if (desc == NULL) {
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800344 /*
345 * No error is set when there is even no header, drivers are
346 * supposed to analyze buffer_actual_len.
347 */
Stephen Hemminger42dd2712017-03-04 18:27:15 -0700348 return 0;
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800349 }
Hank Janssen3e7ee492009-07-13 16:02:34 -0700350
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700351 offset = raw ? 0 : (desc->offset8 << 3);
352 packetlen = (desc->len8 << 3) - offset;
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800353 *buffer_actual_len = packetlen;
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700354 *requestid = desc->trans_id;
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800355
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700356 if (unlikely(packetlen > buflen))
K. Y. Srinivasan3eba9a72016-01-27 22:29:44 -0800357 return -ENOBUFS;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700358
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700359 /* since ring is double mapped, only one copy is necessary */
360 memcpy(buffer, (const char *)desc + offset, packetlen);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700361
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700362 /* Advance ring index to next packet descriptor */
363 __hv_pkt_iter_next(channel, desc);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700364
Stephen Hemminger4226ff62017-06-25 12:30:24 -0700365 /* Notify host of update */
366 hv_pkt_iter_close(channel);
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800367
Stephen Hemminger42dd2712017-03-04 18:27:15 -0700368 return 0;
Vitaly Kuznetsovb5f53dd2015-12-14 19:01:59 -0800369}
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800370
371/*
372 * Determine number of bytes available in ring buffer after
373 * the current iterator (priv_read_index) location.
374 *
375 * This is similar to hv_get_bytes_to_read but with private
376 * read index instead.
377 */
378static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
379{
380 u32 priv_read_loc = rbi->priv_read_index;
381 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
382
383 if (write_loc >= priv_read_loc)
384 return write_loc - priv_read_loc;
385 else
386 return (rbi->ring_datasize - priv_read_loc) + write_loc;
387}
388
389/*
390 * Get first vmbus packet from ring buffer after read_index
391 *
392 * If ring buffer is empty, returns NULL and no other action needed.
393 */
394struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
395{
396 struct hv_ring_buffer_info *rbi = &channel->inbound;
Stephen Hemminger15e16742017-06-25 12:30:29 -0700397 struct vmpacket_descriptor *desc;
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800398
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800399 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
400 return NULL;
401
Stephen Hemminger15e16742017-06-25 12:30:29 -0700402 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
403 if (desc)
404 prefetch((char *)desc + (desc->len8 << 3));
405
406 return desc;
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800407}
408EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
409
410/*
411 * Get next vmbus packet from ring buffer.
412 *
413 * Advances the current location (priv_read_index) and checks for more
414 * data. If the end of the ring buffer is reached, then return NULL.
415 */
416struct vmpacket_descriptor *
417__hv_pkt_iter_next(struct vmbus_channel *channel,
418 const struct vmpacket_descriptor *desc)
419{
420 struct hv_ring_buffer_info *rbi = &channel->inbound;
421 u32 packetlen = desc->len8 << 3;
422 u32 dsize = rbi->ring_datasize;
423
424 /* bump offset to next potential packet */
425 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
426 if (rbi->priv_read_index >= dsize)
427 rbi->priv_read_index -= dsize;
428
429 /* more data? */
Stephen Hemminger05d00bc2017-06-25 12:30:27 -0700430 return hv_pkt_iter_first(channel);
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800431}
432EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
433
Michael Kelley655296c2018-03-04 22:24:08 -0700434/* How many bytes were read in this iterator cycle */
435static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
436 u32 start_read_index)
437{
438 if (rbi->priv_read_index >= start_read_index)
439 return rbi->priv_read_index - start_read_index;
440 else
441 return rbi->ring_datasize - start_read_index +
442 rbi->priv_read_index;
443}
444
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800445/*
Michael Kelley71b38242018-06-05 13:37:51 -0700446 * Update host ring buffer after iterating over packets. If the host has
447 * stopped queuing new entries because it found the ring buffer full, and
448 * sufficient space is being freed up, signal the host. But be careful to
449 * only signal the host when necessary, both for performance reasons and
450 * because Hyper-V protects itself by throttling guests that signal
451 * inappropriately.
452 *
453 * Determining when to signal is tricky. There are three key data inputs
454 * that must be handled in this order to avoid race conditions:
455 *
456 * 1. Update the read_index
457 * 2. Read the pending_send_sz
458 * 3. Read the current write_index
459 *
460 * The interrupt_mask is not used to determine when to signal. The
461 * interrupt_mask is used only on the guest->host ring buffer when
462 * sending requests to the host. The host does not use it on the host->
463 * guest ring buffer to indicate whether it should be signaled.
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800464 */
465void hv_pkt_iter_close(struct vmbus_channel *channel)
466{
467 struct hv_ring_buffer_info *rbi = &channel->inbound;
Michael Kelley655296c2018-03-04 22:24:08 -0700468 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800469
470 /*
471 * Make sure all reads are done before we update the read index since
472 * the writer may start writing to the read area once the read index
473 * is updated.
474 */
475 virt_rmb();
Michael Kelley655296c2018-03-04 22:24:08 -0700476 start_read_index = rbi->ring_buffer->read_index;
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800477 rbi->ring_buffer->read_index = rbi->priv_read_index;
478
Michael Kelley71b38242018-06-05 13:37:51 -0700479 /*
480 * Older versions of Hyper-V (before WS2102 and Win8) do not
481 * implement pending_send_sz and simply poll if the host->guest
482 * ring buffer is full. No signaling is needed or expected.
483 */
Michael Kelley655296c2018-03-04 22:24:08 -0700484 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
485 return;
486
Stephen Hemminger8dd45f22017-06-25 12:30:26 -0700487 /*
488 * Issue a full memory barrier before making the signaling decision.
Michael Kelley71b38242018-06-05 13:37:51 -0700489 * If reading pending_send_sz were to be reordered and happen
490 * before we commit the new read_index, a race could occur. If the
491 * host were to set the pending_send_sz after we have sampled
492 * pending_send_sz, and the ring buffer blocks before we commit the
Stephen Hemminger8dd45f22017-06-25 12:30:26 -0700493 * read index, we could miss sending the interrupt. Issue a full
494 * memory barrier to address this.
495 */
496 virt_mb();
497
Michael Kelley71b38242018-06-05 13:37:51 -0700498 /*
499 * If the pending_send_sz is zero, then the ring buffer is not
500 * blocked and there is no need to signal. This is far by the
501 * most common case, so exit quickly for best performance.
502 */
Michael Kelley655296c2018-03-04 22:24:08 -0700503 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
504 if (!pending_sz)
Stephen Hemminger8dd45f22017-06-25 12:30:26 -0700505 return;
506
Michael Kelley655296c2018-03-04 22:24:08 -0700507 /*
508 * Ensure the read of write_index in hv_get_bytes_to_write()
509 * happens after the read of pending_send_sz.
510 */
511 virt_rmb();
512 curr_write_sz = hv_get_bytes_to_write(rbi);
513 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
Stephen Hemminger8dd45f22017-06-25 12:30:26 -0700514
Michael Kelley655296c2018-03-04 22:24:08 -0700515 /*
Michael Kelley71b38242018-06-05 13:37:51 -0700516 * We want to signal the host only if we're transitioning
517 * from a "not enough free space" state to a "enough free
518 * space" state. For example, it's possible that this function
519 * could run and free up enough space to signal the host, and then
520 * run again and free up additional space before the host has a
521 * chance to clear the pending_send_sz. The 2nd invocation would
522 * be a null transition from "enough free space" to "enough free
523 * space", which doesn't warrant a signal.
524 *
525 * Exactly filling the ring buffer is treated as "not enough
526 * space". The ring buffer always must have at least one byte
527 * empty so the empty and full conditions are distinguishable.
528 * hv_get_bytes_to_write() doesn't fully tell the truth in
529 * this regard.
530 *
531 * So first check if we were in the "enough free space" state
532 * before we began the iteration. If so, the host was not
533 * blocked, and there's no need to signal.
Michael Kelley655296c2018-03-04 22:24:08 -0700534 */
Michael Kelley655296c2018-03-04 22:24:08 -0700535 if (curr_write_sz - bytes_read > pending_sz)
536 return;
537
Michael Kelley71b38242018-06-05 13:37:51 -0700538 /*
539 * Similarly, if the new state is "not enough space", then
540 * there's no need to signal.
541 */
Michael Kelley655296c2018-03-04 22:24:08 -0700542 if (curr_write_sz <= pending_sz)
543 return;
Stephen Hemminger03bad712017-06-25 12:30:28 -0700544
Kimberly Brown396ae572019-02-04 02:13:09 -0500545 ++channel->intr_in_full;
Stephen Hemminger03bad712017-06-25 12:30:28 -0700546 vmbus_setevent(channel);
stephen hemmingerf3dd3f42017-02-27 10:26:48 -0800547}
548EXPORT_SYMBOL_GPL(hv_pkt_iter_close);