blob: 26c93cf9f6be4a083f47615318699e3110d39983 [file] [log] [blame]
Hank Janssen3e7ee492009-07-13 16:02:34 -07001/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070021 * K. Y. Srinivasan <kys@microsoft.com>
Hank Janssen3e7ee492009-07-13 16:02:34 -070022 *
23 */
Hank Janssen0a466182011-03-29 13:58:47 -070024#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Hank Janssen3e7ee492009-07-13 16:02:34 -070025
Greg Kroah-Hartmana0086dc2009-08-17 17:22:08 -070026#include <linux/kernel.h>
27#include <linux/mm.h>
Greg Kroah-Hartman46a97192011-10-04 12:29:52 -070028#include <linux/hyperv.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070029
K. Y. Srinivasan0f2a6612011-05-12 19:34:28 -070030#include "hyperv_vmbus.h"
Hank Janssen3e7ee492009-07-13 16:02:34 -070031
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080032void hv_begin_read(struct hv_ring_buffer_info *rbi)
33{
34 rbi->ring_buffer->interrupt_mask = 1;
Jason Wang35848f62013-06-18 13:04:23 +080035 mb();
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080036}
37
38u32 hv_end_read(struct hv_ring_buffer_info *rbi)
39{
40 u32 read;
41 u32 write;
42
43 rbi->ring_buffer->interrupt_mask = 0;
Jason Wang35848f62013-06-18 13:04:23 +080044 mb();
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080045
46 /*
47 * Now check to see if the ring buffer is still empty.
48 * If it is not, we raced and we need to process new
49 * incoming messages.
50 */
51 hv_get_ringbuffer_availbytes(rbi, &read, &write);
52
53 return read;
54}
55
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080056/*
57 * When we write to the ring buffer, check if the host needs to
58 * be signaled. Here is the details of this protocol:
59 *
60 * 1. The host guarantees that while it is draining the
61 * ring buffer, it will set the interrupt_mask to
62 * indicate it does not need to be interrupted when
63 * new data is placed.
64 *
65 * 2. The host guarantees that it will completely drain
66 * the ring buffer before exiting the read loop. Further,
67 * once the ring buffer is empty, it will clear the
68 * interrupt_mask and re-check to see if new data has
69 * arrived.
70 */
71
72static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
73{
Jason Wang35848f62013-06-18 13:04:23 +080074 mb();
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080075 if (rbi->ring_buffer->interrupt_mask)
76 return false;
77
Jason Wange91e84f2013-06-20 12:58:57 +080078 /* check interrupt_mask before read_index */
79 rmb();
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -080080 /*
81 * This is the only case we need to signal when the
82 * ring transitions from being empty to non-empty.
83 */
84 if (old_write == rbi->ring_buffer->read_index)
85 return true;
86
87 return false;
88}
89
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -080090/*
91 * To optimize the flow management on the send-side,
92 * when the sender is blocked because of lack of
93 * sufficient space in the ring buffer, potential the
94 * consumer of the ring buffer can signal the producer.
95 * This is controlled by the following parameters:
96 *
97 * 1. pending_send_sz: This is the size in bytes that the
98 * producer is trying to send.
99 * 2. The feature bit feat_pending_send_sz set to indicate if
100 * the consumer of the ring will signal when the ring
101 * state transitions from being full to a state where
102 * there is room for the producer to send the pending packet.
103 */
104
105static bool hv_need_to_signal_on_read(u32 old_rd,
106 struct hv_ring_buffer_info *rbi)
107{
108 u32 prev_write_sz;
109 u32 cur_write_sz;
110 u32 r_size;
111 u32 write_loc = rbi->ring_buffer->write_index;
112 u32 read_loc = rbi->ring_buffer->read_index;
113 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
114
115 /*
116 * If the other end is not blocked on write don't bother.
117 */
118 if (pending_sz == 0)
119 return false;
120
121 r_size = rbi->ring_datasize;
122 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
123 read_loc - write_loc;
124
125 prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
126 old_rd - write_loc;
127
128
129 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
130 return true;
131
132 return false;
133}
Hank Janssen3e7ee492009-07-13 16:02:34 -0700134
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700135/*
136 * hv_get_next_write_location()
137 *
138 * Get the next write location for the specified ring buffer
139 *
140 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700141static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700142hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700143{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800144 u32 next = ring_info->ring_buffer->write_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700145
Hank Janssen3e7ee492009-07-13 16:02:34 -0700146 return next;
147}
148
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700149/*
150 * hv_set_next_write_location()
151 *
152 * Set the next write location for the specified ring buffer
153 *
154 */
Hank Janssen3e7ee492009-07-13 16:02:34 -0700155static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700156hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800157 u32 next_write_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700158{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800159 ring_info->ring_buffer->write_index = next_write_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700160}
161
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700162/*
163 * hv_get_next_read_location()
164 *
165 * Get the next read location for the specified ring buffer
166 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700167static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700168hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700169{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800170 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700171
Hank Janssen3e7ee492009-07-13 16:02:34 -0700172 return next;
173}
174
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700175/*
176 * hv_get_next_readlocation_withoffset()
177 *
178 * Get the next read location + offset for the specified ring buffer.
179 * This allows the caller to skip
180 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700181static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700182hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800183 u32 offset)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700184{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800185 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700186
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800187 next += offset;
188 next %= ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700189
190 return next;
191}
192
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700193/*
194 *
195 * hv_set_next_read_location()
196 *
197 * Set the next read location for the specified ring buffer
198 *
199 */
Hank Janssen3e7ee492009-07-13 16:02:34 -0700200static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700201hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800202 u32 next_read_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700203{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800204 ring_info->ring_buffer->read_index = next_read_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700205}
206
207
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700208/*
209 *
210 * hv_get_ring_buffer()
211 *
212 * Get the start of the ring buffer
213 */
Greg Kroah-Hartman8282c402009-07-14 15:06:28 -0700214static inline void *
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700215hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700216{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800217 return (void *)ring_info->ring_buffer->buffer;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700218}
219
220
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700221/*
222 *
223 * hv_get_ring_buffersize()
224 *
225 * Get the size of the ring buffer
226 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700227static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700228hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700229{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800230 return ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700231}
232
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700233/*
234 *
235 * hv_get_ring_bufferindices()
236 *
237 * Get the read and write indices as u64 of the specified ring buffer
238 *
239 */
Greg Kroah-Hartman59471432009-07-14 15:10:26 -0700240static inline u64
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700241hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700242{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800243 return (u64)ring_info->ring_buffer->write_index << 32;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700244}
245
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700246/*
247 *
248 * hv_copyfrom_ringbuffer()
249 *
250 * Helper routine to copy to source from ring buffer.
251 * Assume there is enough room. Handles wrap-around in src case only!!
252 *
253 */
254static u32 hv_copyfrom_ringbuffer(
255 struct hv_ring_buffer_info *ring_info,
256 void *dest,
257 u32 destlen,
258 u32 start_read_offset)
259{
260 void *ring_buffer = hv_get_ring_buffer(ring_info);
261 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
262
263 u32 frag_len;
264
265 /* wrap-around detected at the src */
266 if (destlen > ring_buffer_size - start_read_offset) {
267 frag_len = ring_buffer_size - start_read_offset;
268
269 memcpy(dest, ring_buffer + start_read_offset, frag_len);
270 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
271 } else
272
273 memcpy(dest, ring_buffer + start_read_offset, destlen);
274
275
276 start_read_offset += destlen;
277 start_read_offset %= ring_buffer_size;
278
279 return start_read_offset;
280}
281
282
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700283/*
284 *
285 * hv_copyto_ringbuffer()
286 *
287 * Helper routine to copy from source to ring buffer.
288 * Assume there is enough room. Handles wrap-around in dest case only!!
289 *
290 */
291static u32 hv_copyto_ringbuffer(
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800292 struct hv_ring_buffer_info *ring_info,
293 u32 start_write_offset,
294 void *src,
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700295 u32 srclen)
296{
297 void *ring_buffer = hv_get_ring_buffer(ring_info);
298 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
299 u32 frag_len;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700300
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700301 /* wrap-around detected! */
302 if (srclen > ring_buffer_size - start_write_offset) {
303 frag_len = ring_buffer_size - start_write_offset;
304 memcpy(ring_buffer + start_write_offset, src, frag_len);
305 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
306 } else
307 memcpy(ring_buffer + start_write_offset, src, srclen);
308
309 start_write_offset += srclen;
310 start_write_offset %= ring_buffer_size;
311
312 return start_write_offset;
313}
Hank Janssen3e7ee492009-07-13 16:02:34 -0700314
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700315/*
316 *
317 * hv_ringbuffer_get_debuginfo()
318 *
319 * Get various debug metrics for the specified ring buffer
320 *
321 */
K. Y. Srinivasana75b61d2011-05-10 07:55:28 -0700322void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
Greg Kroah-Hartman80682b72010-07-27 11:37:32 -0700323 struct hv_ring_buffer_debug_info *debug_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700324{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800325 u32 bytes_avail_towrite;
326 u32 bytes_avail_toread;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700327
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800328 if (ring_info->ring_buffer) {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700329 hv_get_ringbuffer_availbytes(ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800330 &bytes_avail_toread,
331 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700332
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800333 debug_info->bytes_avail_toread = bytes_avail_toread;
334 debug_info->bytes_avail_towrite = bytes_avail_towrite;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800335 debug_info->current_read_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800336 ring_info->ring_buffer->read_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800337 debug_info->current_write_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800338 ring_info->ring_buffer->write_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800339 debug_info->current_interrupt_mask =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800340 ring_info->ring_buffer->interrupt_mask;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700341 }
342}
343
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700344/*
345 *
346 * hv_ringbuffer_init()
347 *
348 *Initialize the ring buffer
349 *
350 */
K. Y. Srinivasan72a95cb2011-05-10 07:55:21 -0700351int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800352 void *buffer, u32 buflen)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700353{
Greg Kroah-Hartman4a1b3ac2010-07-27 11:47:08 -0700354 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
Bill Pemberton3324fb42010-05-05 15:27:49 -0400355 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700356
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800357 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700358
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800359 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
360 ring_info->ring_buffer->read_index =
361 ring_info->ring_buffer->write_index = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700362
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800363 ring_info->ring_size = buflen;
364 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700365
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800366 spin_lock_init(&ring_info->ring_lock);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700367
368 return 0;
369}
370
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700371/*
372 *
373 * hv_ringbuffer_cleanup()
374 *
375 * Cleanup the ring buffer
376 *
377 */
K. Y. Srinivasan2dba6882011-05-10 07:55:22 -0700378void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700379{
Hank Janssen3e7ee492009-07-13 16:02:34 -0700380}
381
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700382/*
383 *
384 * hv_ringbuffer_write()
385 *
386 * Write to the ring buffer
387 *
388 */
K. Y. Srinivasan633c4dc2011-05-10 07:55:23 -0700389int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800390 struct scatterlist *sglist, u32 sgcount, bool *signal)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700391{
C. Bartlett4408f532010-02-03 15:34:27 +0000392 int i = 0;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800393 u32 bytes_avail_towrite;
394 u32 bytes_avail_toread;
395 u32 totalbytes_towrite = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700396
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200397 struct scatterlist *sg;
K. Y. Srinivasan66a60542011-05-10 07:55:33 -0700398 u32 next_write_location;
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800399 u32 old_write;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800400 u64 prev_indices = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700401 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700402
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200403 for_each_sg(sglist, sg, sgcount, i)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700404 {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800405 totalbytes_towrite += sg->length;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700406 }
407
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800408 totalbytes_towrite += sizeof(u64);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700409
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800410 spin_lock_irqsave(&outring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700411
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700412 hv_get_ringbuffer_availbytes(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800413 &bytes_avail_toread,
414 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700415
Hank Janssen3e7ee492009-07-13 16:02:34 -0700416
C. Bartlett4408f532010-02-03 15:34:27 +0000417 /* If there is only room for the packet, assume it is full. */
418 /* Otherwise, the next time around, we think the ring buffer */
Bill Pemberton454f18a2009-07-27 16:47:24 -0400419 /* is empty since the read index == write index */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800420 if (bytes_avail_towrite <= totalbytes_towrite) {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800421 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700422 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700423 }
424
Bill Pemberton454f18a2009-07-27 16:47:24 -0400425 /* Write to the ring buffer */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700426 next_write_location = hv_get_next_write_location(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700427
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800428 old_write = next_write_location;
429
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200430 for_each_sg(sglist, sg, sgcount, i)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700431 {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700432 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800433 next_write_location,
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200434 sg_virt(sg),
435 sg->length);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700436 }
437
Bill Pemberton454f18a2009-07-27 16:47:24 -0400438 /* Set previous packet start */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700439 prev_indices = hv_get_ring_bufferindices(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700440
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700441 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800442 next_write_location,
443 &prev_indices,
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200444 sizeof(u64));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700445
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800446 /* Issue a full memory barrier before updating the write index */
Jason Wang35848f62013-06-18 13:04:23 +0800447 mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700448
Bill Pemberton454f18a2009-07-27 16:47:24 -0400449 /* Now, update the write location */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700450 hv_set_next_write_location(outring_info, next_write_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700451
Hank Janssen3e7ee492009-07-13 16:02:34 -0700452
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800453 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasan98fa8cf2012-12-01 06:46:36 -0800454
455 *signal = hv_need_to_signal(old_write, outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700456 return 0;
457}
458
459
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700460/*
461 *
462 * hv_ringbuffer_peek()
463 *
464 * Read without advancing the read index
465 *
466 */
K. Y. Srinivasana89186c2011-05-10 07:55:24 -0700467int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800468 void *Buffer, u32 buflen)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700469{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800470 u32 bytes_avail_towrite;
471 u32 bytes_avail_toread;
472 u32 next_read_location = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700473 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700474
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800475 spin_lock_irqsave(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700476
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700477 hv_get_ringbuffer_availbytes(Inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800478 &bytes_avail_toread,
479 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700480
Bill Pemberton454f18a2009-07-27 16:47:24 -0400481 /* Make sure there is something to read */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800482 if (bytes_avail_toread < buflen) {
Hank Janssen3e7ee492009-07-13 16:02:34 -0700483
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800484 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700485
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700486 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700487 }
488
Bill Pemberton454f18a2009-07-27 16:47:24 -0400489 /* Convert to byte offset */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700490 next_read_location = hv_get_next_read_location(Inring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700491
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700492 next_read_location = hv_copyfrom_ringbuffer(Inring_info,
C. Bartlett4408f532010-02-03 15:34:27 +0000493 Buffer,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800494 buflen,
495 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700496
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800497 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700498
499 return 0;
500}
501
502
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700503/*
504 *
505 * hv_ringbuffer_read()
506 *
507 * Read and advance the read index
508 *
509 */
K. Y. Srinivasan38397c82011-05-10 07:55:25 -0700510int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800511 u32 buflen, u32 offset, bool *signal)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700512{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800513 u32 bytes_avail_towrite;
514 u32 bytes_avail_toread;
515 u32 next_read_location = 0;
516 u64 prev_indices = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700517 unsigned long flags;
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800518 u32 old_read;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700519
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800520 if (buflen <= 0)
Bill Pembertona16e1482010-05-05 15:27:50 -0400521 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700522
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800523 spin_lock_irqsave(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700524
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700525 hv_get_ringbuffer_availbytes(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800526 &bytes_avail_toread,
527 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700528
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800529 old_read = bytes_avail_toread;
530
Bill Pemberton454f18a2009-07-27 16:47:24 -0400531 /* Make sure there is something to read */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800532 if (bytes_avail_toread < buflen) {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800533 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700534
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700535 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700536 }
537
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800538 next_read_location =
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700539 hv_get_next_readlocation_withoffset(inring_info, offset);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700540
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700541 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800542 buffer,
543 buflen,
544 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700545
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700546 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800547 &prev_indices,
C. Bartlett4408f532010-02-03 15:34:27 +0000548 sizeof(u64),
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800549 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700550
Bill Pemberton454f18a2009-07-27 16:47:24 -0400551 /* Make sure all reads are done before we update the read index since */
C. Bartlett4408f532010-02-03 15:34:27 +0000552 /* the writer may start writing to the read area once the read index */
553 /*is updated */
Jason Wang35848f62013-06-18 13:04:23 +0800554 mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700555
Bill Pemberton454f18a2009-07-27 16:47:24 -0400556 /* Update the read index */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700557 hv_set_next_read_location(inring_info, next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700558
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800559 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700560
K. Y. Srinivasanc2b8e522012-12-01 06:46:57 -0800561 *signal = hv_need_to_signal_on_read(old_read, inring_info);
562
Hank Janssen3e7ee492009-07-13 16:02:34 -0700563 return 0;
564}