blob: e3edcaee7ab355ee7592cd65280a13c87c1b0e4d [file] [log] [blame]
Hank Janssen3e7ee492009-07-13 16:02:34 -07001/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070021 * K. Y. Srinivasan <kys@microsoft.com>
Hank Janssen3e7ee492009-07-13 16:02:34 -070022 *
23 */
Hank Janssen0a466182011-03-29 13:58:47 -070024#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Hank Janssen3e7ee492009-07-13 16:02:34 -070025
Greg Kroah-Hartmana0086dc2009-08-17 17:22:08 -070026#include <linux/kernel.h>
27#include <linux/mm.h>
Greg Kroah-Hartman46a97192011-10-04 12:29:52 -070028#include <linux/hyperv.h>
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -080029#include <linux/uio.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070030
K. Y. Srinivasan0f2a6612011-05-12 19:34:28 -070031#include "hyperv_vmbus.h"
Hank Janssen3e7ee492009-07-13 16:02:34 -070032
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080033void hv_begin_read(struct hv_ring_buffer_info *rbi)
34{
35 rbi->ring_buffer->interrupt_mask = 1;
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -070036 virt_mb();
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080037}
38
39u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40{
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080041
42 rbi->ring_buffer->interrupt_mask = 0;
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -070043 virt_mb();
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080044
45 /*
46 * Now check to see if the ring buffer is still empty.
47 * If it is not, we raced and we need to process new
48 * incoming messages.
49 */
K. Y. Srinivasana6341f02016-04-02 17:59:46 -070050 return hv_get_bytes_to_read(rbi);
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080051}
52
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080053/*
54 * When we write to the ring buffer, check if the host needs to
55 * be signaled. Here is the details of this protocol:
56 *
57 * 1. The host guarantees that while it is draining the
58 * ring buffer, it will set the interrupt_mask to
59 * indicate it does not need to be interrupted when
60 * new data is placed.
61 *
62 * 2. The host guarantees that it will completely drain
63 * the ring buffer before exiting the read loop. Further,
64 * once the ring buffer is empty, it will clear the
65 * interrupt_mask and re-check to see if new data has
66 * arrived.
67 */
68
K. Y. Srinivasanccef9bc2016-07-01 16:26:35 -070069static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
70 enum hv_signal_policy policy)
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080071{
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -070072 virt_mb();
K. Y. Srinivasand45faae2016-04-02 17:59:47 -070073 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080074 return false;
75
K. Y. Srinivasanccef9bc2016-07-01 16:26:35 -070076 /*
77 * When the client wants to control signaling,
78 * we only honour the host interrupt mask.
79 */
80 if (policy == HV_SIGNAL_POLICY_EXPLICIT)
81 return true;
82
Jason Wange91e84f2013-06-20 12:58:57 +080083 /* check interrupt_mask before read_index */
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -070084 virt_rmb();
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080085 /*
86 * This is the only case we need to signal when the
87 * ring transitions from being empty to non-empty.
88 */
K. Y. Srinivasand45faae2016-04-02 17:59:47 -070089 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080090 return true;
91
92 return false;
93}
94
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -080095/* Get the next write location for the specified ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -070096static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -070097hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -070098{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080099 u32 next = ring_info->ring_buffer->write_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700100
Hank Janssen3e7ee492009-07-13 16:02:34 -0700101 return next;
102}
103
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800104/* Set the next write location for the specified ring buffer. */
Hank Janssen3e7ee492009-07-13 16:02:34 -0700105static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700106hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800107 u32 next_write_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700108{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800109 ring_info->ring_buffer->write_index = next_write_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700110}
111
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800112/* Get the next read location for the specified ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700113static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700114hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700115{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800116 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700117
Hank Janssen3e7ee492009-07-13 16:02:34 -0700118 return next;
119}
120
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700121/*
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700122 * Get the next read location + offset for the specified ring buffer.
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800123 * This allows the caller to skip.
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700124 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700125static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700126hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800127 u32 offset)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700128{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800129 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700130
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800131 next += offset;
132 next %= ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700133
134 return next;
135}
136
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800137/* Set the next read location for the specified ring buffer. */
Hank Janssen3e7ee492009-07-13 16:02:34 -0700138static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700139hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800140 u32 next_read_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700141{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800142 ring_info->ring_buffer->read_index = next_read_location;
K. Y. Srinivasanab028db2016-04-02 17:59:51 -0700143 ring_info->priv_read_index = next_read_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700144}
145
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800146/* Get the size of the ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700147static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700148hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700149{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800150 return ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700151}
152
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800153/* Get the read and write indices as u64 of the specified ring buffer. */
Greg Kroah-Hartman59471432009-07-14 15:10:26 -0700154static inline u64
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700155hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700156{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800157 return (u64)ring_info->ring_buffer->write_index << 32;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700158}
159
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700160/*
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700161 * Helper routine to copy to source from ring buffer.
162 * Assume there is enough room. Handles wrap-around in src case only!!
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700163 */
164static u32 hv_copyfrom_ringbuffer(
165 struct hv_ring_buffer_info *ring_info,
166 void *dest,
167 u32 destlen,
168 u32 start_read_offset)
169{
170 void *ring_buffer = hv_get_ring_buffer(ring_info);
171 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
172
173 u32 frag_len;
174
175 /* wrap-around detected at the src */
176 if (destlen > ring_buffer_size - start_read_offset) {
177 frag_len = ring_buffer_size - start_read_offset;
178
179 memcpy(dest, ring_buffer + start_read_offset, frag_len);
180 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
181 } else
182
183 memcpy(dest, ring_buffer + start_read_offset, destlen);
184
185
186 start_read_offset += destlen;
187 start_read_offset %= ring_buffer_size;
188
189 return start_read_offset;
190}
191
192
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700193/*
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700194 * Helper routine to copy from source to ring buffer.
195 * Assume there is enough room. Handles wrap-around in dest case only!!
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700196 */
197static u32 hv_copyto_ringbuffer(
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800198 struct hv_ring_buffer_info *ring_info,
199 u32 start_write_offset,
200 void *src,
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700201 u32 srclen)
202{
203 void *ring_buffer = hv_get_ring_buffer(ring_info);
204 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
205 u32 frag_len;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700206
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700207 /* wrap-around detected! */
208 if (srclen > ring_buffer_size - start_write_offset) {
209 frag_len = ring_buffer_size - start_write_offset;
210 memcpy(ring_buffer + start_write_offset, src, frag_len);
211 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
212 } else
213 memcpy(ring_buffer + start_write_offset, src, srclen);
214
215 start_write_offset += srclen;
216 start_write_offset %= ring_buffer_size;
217
218 return start_write_offset;
219}
Hank Janssen3e7ee492009-07-13 16:02:34 -0700220
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800221/* Get various debug metrics for the specified ring buffer. */
K. Y. Srinivasana75b61d2011-05-10 07:55:28 -0700222void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
Greg Kroah-Hartman80682b72010-07-27 11:37:32 -0700223 struct hv_ring_buffer_debug_info *debug_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700224{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800225 u32 bytes_avail_towrite;
226 u32 bytes_avail_toread;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700227
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800228 if (ring_info->ring_buffer) {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700229 hv_get_ringbuffer_availbytes(ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800230 &bytes_avail_toread,
231 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700232
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800233 debug_info->bytes_avail_toread = bytes_avail_toread;
234 debug_info->bytes_avail_towrite = bytes_avail_towrite;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800235 debug_info->current_read_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800236 ring_info->ring_buffer->read_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800237 debug_info->current_write_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800238 ring_info->ring_buffer->write_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800239 debug_info->current_interrupt_mask =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800240 ring_info->ring_buffer->interrupt_mask;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700241 }
242}
243
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800244/* Initialize the ring buffer. */
K. Y. Srinivasan72a95cb2011-05-10 07:55:21 -0700245int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800246 void *buffer, u32 buflen)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700247{
Greg Kroah-Hartman4a1b3ac2010-07-27 11:47:08 -0700248 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
Bill Pemberton3324fb42010-05-05 15:27:49 -0400249 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700250
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800251 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700252
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800253 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
254 ring_info->ring_buffer->read_index =
255 ring_info->ring_buffer->write_index = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700256
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800257 /* Set the feature bit for enabling flow control. */
K. Y. Srinivasan046c7912014-09-05 17:29:12 -0700258 ring_info->ring_buffer->feature_bits.value = 1;
259
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800260 ring_info->ring_size = buflen;
261 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700262
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800263 spin_lock_init(&ring_info->ring_lock);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700264
265 return 0;
266}
267
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800268/* Cleanup the ring buffer. */
K. Y. Srinivasan2dba6882011-05-10 07:55:22 -0700269void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700270{
Hank Janssen3e7ee492009-07-13 16:02:34 -0700271}
272
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800273/* Write to the ring buffer. */
K. Y. Srinivasan633c4dc2011-05-10 07:55:23 -0700274int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
K. Y. Srinivasanccef9bc2016-07-01 16:26:35 -0700275 struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
276 enum hv_signal_policy policy)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700277{
C. Bartlett4408f532010-02-03 15:34:27 +0000278 int i = 0;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800279 u32 bytes_avail_towrite;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800280 u32 totalbytes_towrite = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700281
K. Y. Srinivasan66a60542011-05-10 07:55:33 -0700282 u32 next_write_location;
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800283 u32 old_write;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800284 u64 prev_indices = 0;
K. Y. Srinivasanfe760e42016-01-27 22:29:45 -0800285 unsigned long flags = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700286
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800287 for (i = 0; i < kv_count; i++)
288 totalbytes_towrite += kv_list[i].iov_len;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700289
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800290 totalbytes_towrite += sizeof(u64);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700291
K. Y. Srinivasanfe760e42016-01-27 22:29:45 -0800292 if (lock)
293 spin_lock_irqsave(&outring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700294
K. Y. Srinivasana6341f02016-04-02 17:59:46 -0700295 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700296
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800297 /*
298 * If there is only room for the packet, assume it is full.
299 * Otherwise, the next time around, we think the ring buffer
300 * is empty since the read index == write index.
301 */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800302 if (bytes_avail_towrite <= totalbytes_towrite) {
K. Y. Srinivasanfe760e42016-01-27 22:29:45 -0800303 if (lock)
304 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700305 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700306 }
307
Bill Pemberton454f18a2009-07-27 16:47:24 -0400308 /* Write to the ring buffer */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700309 next_write_location = hv_get_next_write_location(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700310
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800311 old_write = next_write_location;
312
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800313 for (i = 0; i < kv_count; i++) {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700314 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800315 next_write_location,
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800316 kv_list[i].iov_base,
317 kv_list[i].iov_len);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700318 }
319
Bill Pemberton454f18a2009-07-27 16:47:24 -0400320 /* Set previous packet start */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700321 prev_indices = hv_get_ring_bufferindices(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700322
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700323 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800324 next_write_location,
325 &prev_indices,
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200326 sizeof(u64));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700327
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800328 /* Issue a full memory barrier before updating the write index */
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -0700329 virt_mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700330
Bill Pemberton454f18a2009-07-27 16:47:24 -0400331 /* Now, update the write location */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700332 hv_set_next_write_location(outring_info, next_write_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700333
Hank Janssen3e7ee492009-07-13 16:02:34 -0700334
K. Y. Srinivasanfe760e42016-01-27 22:29:45 -0800335 if (lock)
336 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800337
K. Y. Srinivasanccef9bc2016-07-01 16:26:35 -0700338 *signal = hv_need_to_signal(old_write, outring_info, policy);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700339 return 0;
340}
341
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800342int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
343 void *buffer, u32 buflen, u32 *buffer_actual_len,
344 u64 *requestid, bool *signal, bool raw)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700345{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800346 u32 bytes_avail_toread;
347 u32 next_read_location = 0;
348 u64 prev_indices = 0;
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800349 struct vmpacket_descriptor desc;
350 u32 offset;
351 u32 packetlen;
352 int ret = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700353
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800354 if (buflen <= 0)
Bill Pembertona16e1482010-05-05 15:27:50 -0400355 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700356
Hank Janssen3e7ee492009-07-13 16:02:34 -0700357
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800358 *buffer_actual_len = 0;
359 *requestid = 0;
360
K. Y. Srinivasana6341f02016-04-02 17:59:46 -0700361 bytes_avail_toread = hv_get_bytes_to_read(inring_info);
Bill Pemberton454f18a2009-07-27 16:47:24 -0400362 /* Make sure there is something to read */
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800363 if (bytes_avail_toread < sizeof(desc)) {
364 /*
365 * No error is set when there is even no header, drivers are
366 * supposed to analyze buffer_actual_len.
367 */
K. Y. Srinivasan3eba9a72016-01-27 22:29:44 -0800368 return ret;
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800369 }
Hank Janssen3e7ee492009-07-13 16:02:34 -0700370
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800371 next_read_location = hv_get_next_read_location(inring_info);
372 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
373 sizeof(desc),
374 next_read_location);
375
376 offset = raw ? 0 : (desc.offset8 << 3);
377 packetlen = (desc.len8 << 3) - offset;
378 *buffer_actual_len = packetlen;
379 *requestid = desc.trans_id;
380
K. Y. Srinivasan3eba9a72016-01-27 22:29:44 -0800381 if (bytes_avail_toread < packetlen + offset)
382 return -EAGAIN;
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800383
K. Y. Srinivasan3eba9a72016-01-27 22:29:44 -0800384 if (packetlen > buflen)
385 return -ENOBUFS;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700386
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800387 next_read_location =
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700388 hv_get_next_readlocation_withoffset(inring_info, offset);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700389
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700390 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800391 buffer,
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800392 packetlen,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800393 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700394
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700395 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800396 &prev_indices,
C. Bartlett4408f532010-02-03 15:34:27 +0000397 sizeof(u64),
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800398 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700399
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800400 /*
401 * Make sure all reads are done before we update the read index since
402 * the writer may start writing to the read area once the read index
403 * is updated.
404 */
K. Y. Srinivasandcd0eec2016-04-02 17:59:48 -0700405 virt_mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700406
Bill Pemberton454f18a2009-07-27 16:47:24 -0400407 /* Update the read index */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700408 hv_set_next_read_location(inring_info, next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700409
K. Y. Srinivasana389fcf2016-04-02 16:17:38 -0700410 *signal = hv_need_to_signal_on_read(inring_info);
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800411
Vitaly Kuznetsov940b68e2015-12-14 19:02:01 -0800412 return ret;
Vitaly Kuznetsovb5f53dd2015-12-14 19:01:59 -0800413}