blob: 7bca513e32b02dda0385f1e3fa4285d00d492c6d [file] [log] [blame]
Hank Janssen3e7ee492009-07-13 16:02:34 -07001/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070021 * K. Y. Srinivasan <kys@microsoft.com>
Hank Janssen3e7ee492009-07-13 16:02:34 -070022 *
23 */
Hank Janssen0a466182011-03-29 13:58:47 -070024#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Hank Janssen3e7ee492009-07-13 16:02:34 -070025
Greg Kroah-Hartmana0086dc2009-08-17 17:22:08 -070026#include <linux/kernel.h>
27#include <linux/mm.h>
Greg Kroah-Hartman46a97192011-10-04 12:29:52 -070028#include <linux/hyperv.h>
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -080029#include <linux/uio.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070030
K. Y. Srinivasan0f2a6612011-05-12 19:34:28 -070031#include "hyperv_vmbus.h"
Hank Janssen3e7ee492009-07-13 16:02:34 -070032
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080033void hv_begin_read(struct hv_ring_buffer_info *rbi)
34{
35 rbi->ring_buffer->interrupt_mask = 1;
Jason Wang35848f62013-06-18 13:04:23 +080036 mb();
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080037}
38
39u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40{
41 u32 read;
42 u32 write;
43
44 rbi->ring_buffer->interrupt_mask = 0;
Jason Wang35848f62013-06-18 13:04:23 +080045 mb();
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080046
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54 return read;
55}
56
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080057/*
58 * When we write to the ring buffer, check if the host needs to
59 * be signaled. Here is the details of this protocol:
60 *
61 * 1. The host guarantees that while it is draining the
62 * ring buffer, it will set the interrupt_mask to
63 * indicate it does not need to be interrupted when
64 * new data is placed.
65 *
66 * 2. The host guarantees that it will completely drain
67 * the ring buffer before exiting the read loop. Further,
68 * once the ring buffer is empty, it will clear the
69 * interrupt_mask and re-check to see if new data has
70 * arrived.
71 */
72
73static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74{
Jason Wang35848f62013-06-18 13:04:23 +080075 mb();
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080076 if (rbi->ring_buffer->interrupt_mask)
77 return false;
78
Jason Wange91e84f2013-06-20 12:58:57 +080079 /* check interrupt_mask before read_index */
80 rmb();
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080081 /*
82 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty.
84 */
85 if (old_write == rbi->ring_buffer->read_index)
86 return true;
87
88 return false;
89}
90
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -080091/*
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
97 *
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
104 */
105
Christopher Ooa5cca682015-08-05 00:52:40 -0700106static bool hv_need_to_signal_on_read(u32 prev_write_sz,
107 struct hv_ring_buffer_info *rbi)
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800108{
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800109 u32 cur_write_sz;
110 u32 r_size;
111 u32 write_loc = rbi->ring_buffer->write_index;
112 u32 read_loc = rbi->ring_buffer->read_index;
113 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
114
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800115 /* If the other end is not blocked on write don't bother. */
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800116 if (pending_sz == 0)
117 return false;
118
119 r_size = rbi->ring_datasize;
120 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
121 read_loc - write_loc;
122
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800123 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
124 return true;
125
126 return false;
127}
Hank Janssen3e7ee492009-07-13 16:02:34 -0700128
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800129/* Get the next write location for the specified ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700130static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700131hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700132{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800133 u32 next = ring_info->ring_buffer->write_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700134
Hank Janssen3e7ee492009-07-13 16:02:34 -0700135 return next;
136}
137
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800138/* Set the next write location for the specified ring buffer. */
Hank Janssen3e7ee492009-07-13 16:02:34 -0700139static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700140hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800141 u32 next_write_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700142{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800143 ring_info->ring_buffer->write_index = next_write_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700144}
145
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800146/* Get the next read location for the specified ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700147static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700148hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700149{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800150 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700151
Hank Janssen3e7ee492009-07-13 16:02:34 -0700152 return next;
153}
154
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700155/*
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700156 * Get the next read location + offset for the specified ring buffer.
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800157 * This allows the caller to skip.
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700158 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700159static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700160hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800161 u32 offset)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700162{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800163 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700164
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800165 next += offset;
166 next %= ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700167
168 return next;
169}
170
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800171/* Set the next read location for the specified ring buffer. */
Hank Janssen3e7ee492009-07-13 16:02:34 -0700172static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700173hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800174 u32 next_read_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700175{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800176 ring_info->ring_buffer->read_index = next_read_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700177}
178
179
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800180/* Get the start of the ring buffer. */
Greg Kroah-Hartman8282c402009-07-14 15:06:28 -0700181static inline void *
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700182hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700183{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800184 return (void *)ring_info->ring_buffer->buffer;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700185}
186
187
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800188/* Get the size of the ring buffer. */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700189static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700190hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700191{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800192 return ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700193}
194
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800195/* Get the read and write indices as u64 of the specified ring buffer. */
Greg Kroah-Hartman59471432009-07-14 15:10:26 -0700196static inline u64
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700197hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700198{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800199 return (u64)ring_info->ring_buffer->write_index << 32;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700200}
201
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700202/*
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700203 * Helper routine to copy to source from ring buffer.
204 * Assume there is enough room. Handles wrap-around in src case only!!
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700205 */
206static u32 hv_copyfrom_ringbuffer(
207 struct hv_ring_buffer_info *ring_info,
208 void *dest,
209 u32 destlen,
210 u32 start_read_offset)
211{
212 void *ring_buffer = hv_get_ring_buffer(ring_info);
213 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
214
215 u32 frag_len;
216
217 /* wrap-around detected at the src */
218 if (destlen > ring_buffer_size - start_read_offset) {
219 frag_len = ring_buffer_size - start_read_offset;
220
221 memcpy(dest, ring_buffer + start_read_offset, frag_len);
222 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
223 } else
224
225 memcpy(dest, ring_buffer + start_read_offset, destlen);
226
227
228 start_read_offset += destlen;
229 start_read_offset %= ring_buffer_size;
230
231 return start_read_offset;
232}
233
234
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700235/*
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700236 * Helper routine to copy from source to ring buffer.
237 * Assume there is enough room. Handles wrap-around in dest case only!!
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700238 */
239static u32 hv_copyto_ringbuffer(
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800240 struct hv_ring_buffer_info *ring_info,
241 u32 start_write_offset,
242 void *src,
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700243 u32 srclen)
244{
245 void *ring_buffer = hv_get_ring_buffer(ring_info);
246 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
247 u32 frag_len;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700248
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700249 /* wrap-around detected! */
250 if (srclen > ring_buffer_size - start_write_offset) {
251 frag_len = ring_buffer_size - start_write_offset;
252 memcpy(ring_buffer + start_write_offset, src, frag_len);
253 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
254 } else
255 memcpy(ring_buffer + start_write_offset, src, srclen);
256
257 start_write_offset += srclen;
258 start_write_offset %= ring_buffer_size;
259
260 return start_write_offset;
261}
Hank Janssen3e7ee492009-07-13 16:02:34 -0700262
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800263/* Get various debug metrics for the specified ring buffer. */
K. Y. Srinivasana75b61d2011-05-10 07:55:28 -0700264void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
Greg Kroah-Hartman80682b72010-07-27 11:37:32 -0700265 struct hv_ring_buffer_debug_info *debug_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700266{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800267 u32 bytes_avail_towrite;
268 u32 bytes_avail_toread;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700269
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800270 if (ring_info->ring_buffer) {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700271 hv_get_ringbuffer_availbytes(ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800272 &bytes_avail_toread,
273 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700274
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800275 debug_info->bytes_avail_toread = bytes_avail_toread;
276 debug_info->bytes_avail_towrite = bytes_avail_towrite;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800277 debug_info->current_read_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800278 ring_info->ring_buffer->read_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800279 debug_info->current_write_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800280 ring_info->ring_buffer->write_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800281 debug_info->current_interrupt_mask =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800282 ring_info->ring_buffer->interrupt_mask;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700283 }
284}
285
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800286/* Initialize the ring buffer. */
K. Y. Srinivasan72a95cb2011-05-10 07:55:21 -0700287int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800288 void *buffer, u32 buflen)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700289{
Greg Kroah-Hartman4a1b3ac2010-07-27 11:47:08 -0700290 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
Bill Pemberton3324fb42010-05-05 15:27:49 -0400291 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700292
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800293 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700294
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800295 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
296 ring_info->ring_buffer->read_index =
297 ring_info->ring_buffer->write_index = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700298
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800299 /* Set the feature bit for enabling flow control. */
K. Y. Srinivasan046c7912014-09-05 17:29:12 -0700300 ring_info->ring_buffer->feature_bits.value = 1;
301
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800302 ring_info->ring_size = buflen;
303 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700304
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800305 spin_lock_init(&ring_info->ring_lock);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700306
307 return 0;
308}
309
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800310/* Cleanup the ring buffer. */
K. Y. Srinivasan2dba6882011-05-10 07:55:22 -0700311void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700312{
Hank Janssen3e7ee492009-07-13 16:02:34 -0700313}
314
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800315/* Write to the ring buffer. */
K. Y. Srinivasan633c4dc2011-05-10 07:55:23 -0700316int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800317 struct kvec *kv_list, u32 kv_count, bool *signal)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700318{
C. Bartlett4408f532010-02-03 15:34:27 +0000319 int i = 0;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800320 u32 bytes_avail_towrite;
321 u32 bytes_avail_toread;
322 u32 totalbytes_towrite = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700323
K. Y. Srinivasan66a60542011-05-10 07:55:33 -0700324 u32 next_write_location;
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800325 u32 old_write;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800326 u64 prev_indices = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700327 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700328
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800329 for (i = 0; i < kv_count; i++)
330 totalbytes_towrite += kv_list[i].iov_len;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700331
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800332 totalbytes_towrite += sizeof(u64);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700333
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800334 spin_lock_irqsave(&outring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700335
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700336 hv_get_ringbuffer_availbytes(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800337 &bytes_avail_toread,
338 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700339
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800340 /*
341 * If there is only room for the packet, assume it is full.
342 * Otherwise, the next time around, we think the ring buffer
343 * is empty since the read index == write index.
344 */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800345 if (bytes_avail_towrite <= totalbytes_towrite) {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800346 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700347 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700348 }
349
Bill Pemberton454f18a2009-07-27 16:47:24 -0400350 /* Write to the ring buffer */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700351 next_write_location = hv_get_next_write_location(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700352
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800353 old_write = next_write_location;
354
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800355 for (i = 0; i < kv_count; i++) {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700356 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800357 next_write_location,
K. Y. Srinivasan011a7c32014-02-01 19:02:20 -0800358 kv_list[i].iov_base,
359 kv_list[i].iov_len);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700360 }
361
Bill Pemberton454f18a2009-07-27 16:47:24 -0400362 /* Set previous packet start */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700363 prev_indices = hv_get_ring_bufferindices(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700364
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700365 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800366 next_write_location,
367 &prev_indices,
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200368 sizeof(u64));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700369
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800370 /* Issue a full memory barrier before updating the write index */
Jason Wang35848f62013-06-18 13:04:23 +0800371 mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700372
Bill Pemberton454f18a2009-07-27 16:47:24 -0400373 /* Now, update the write location */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700374 hv_set_next_write_location(outring_info, next_write_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700375
Hank Janssen3e7ee492009-07-13 16:02:34 -0700376
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800377 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800378
379 *signal = hv_need_to_signal(old_write, outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700380 return 0;
381}
382
383
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800384/* Read without advancing the read index. */
K. Y. Srinivasana89186c2011-05-10 07:55:24 -0700385int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800386 void *Buffer, u32 buflen)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700387{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800388 u32 bytes_avail_towrite;
389 u32 bytes_avail_toread;
390 u32 next_read_location = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700391 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700392
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800393 spin_lock_irqsave(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700394
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700395 hv_get_ringbuffer_availbytes(Inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800396 &bytes_avail_toread,
397 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700398
Bill Pemberton454f18a2009-07-27 16:47:24 -0400399 /* Make sure there is something to read */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800400 if (bytes_avail_toread < buflen) {
Hank Janssen3e7ee492009-07-13 16:02:34 -0700401
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800402 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700403
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700404 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700405 }
406
Bill Pemberton454f18a2009-07-27 16:47:24 -0400407 /* Convert to byte offset */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700408 next_read_location = hv_get_next_read_location(Inring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700409
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700410 next_read_location = hv_copyfrom_ringbuffer(Inring_info,
C. Bartlett4408f532010-02-03 15:34:27 +0000411 Buffer,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800412 buflen,
413 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700414
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800415 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700416
417 return 0;
418}
419
420
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800421/* Read and advance the read index. */
K. Y. Srinivasan38397c82011-05-10 07:55:25 -0700422int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800423 u32 buflen, u32 offset, bool *signal)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700424{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800425 u32 bytes_avail_towrite;
426 u32 bytes_avail_toread;
427 u32 next_read_location = 0;
428 u64 prev_indices = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700429 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700430
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800431 if (buflen <= 0)
Bill Pembertona16e1482010-05-05 15:27:50 -0400432 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700433
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800434 spin_lock_irqsave(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700435
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700436 hv_get_ringbuffer_availbytes(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800437 &bytes_avail_toread,
438 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700439
Bill Pemberton454f18a2009-07-27 16:47:24 -0400440 /* Make sure there is something to read */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800441 if (bytes_avail_toread < buflen) {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800442 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700443
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700444 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700445 }
446
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800447 next_read_location =
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700448 hv_get_next_readlocation_withoffset(inring_info, offset);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700449
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700450 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800451 buffer,
452 buflen,
453 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700454
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700455 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800456 &prev_indices,
C. Bartlett4408f532010-02-03 15:34:27 +0000457 sizeof(u64),
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800458 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700459
Vitaly Kuznetsov822f18d2015-12-14 19:01:57 -0800460 /*
461 * Make sure all reads are done before we update the read index since
462 * the writer may start writing to the read area once the read index
463 * is updated.
464 */
Jason Wang35848f62013-06-18 13:04:23 +0800465 mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700466
Bill Pemberton454f18a2009-07-27 16:47:24 -0400467 /* Update the read index */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700468 hv_set_next_read_location(inring_info, next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700469
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800470 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700471
Christopher Ooa5cca682015-08-05 00:52:40 -0700472 *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800473
Hank Janssen3e7ee492009-07-13 16:02:34 -0700474 return 0;
475}