blob: 00107928770934a93a9aaf8e9db620128ddf7819 [file] [log] [blame]
Hank Janssen3e7ee492009-07-13 16:02:34 -07001/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070021 * K. Y. Srinivasan <kys@microsoft.com>
Hank Janssen3e7ee492009-07-13 16:02:34 -070022 *
23 */
Hank Janssen0a466182011-03-29 13:58:47 -070024#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Hank Janssen3e7ee492009-07-13 16:02:34 -070025
Greg Kroah-Hartmana0086dc2009-08-17 17:22:08 -070026#include <linux/kernel.h>
27#include <linux/mm.h>
Greg Kroah-Hartman46a97192011-10-04 12:29:52 -070028#include <linux/hyperv.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070029
K. Y. Srinivasan0f2a6612011-05-12 19:34:28 -070030#include "hyperv_vmbus.h"
Hank Janssen3e7ee492009-07-13 16:02:34 -070031
K. Y. Srinivasan6fdf3b22012-12-01 06:46:32 -080032void hv_begin_read(struct hv_ring_buffer_info *rbi)
33{
34 rbi->ring_buffer->interrupt_mask = 1;
35 smp_mb();
36}
37
38u32 hv_end_read(struct hv_ring_buffer_info *rbi)
39{
40 u32 read;
41 u32 write;
42
43 rbi->ring_buffer->interrupt_mask = 0;
44 smp_mb();
45
46 /*
47 * Now check to see if the ring buffer is still empty.
48 * If it is not, we raced and we need to process new
49 * incoming messages.
50 */
51 hv_get_ringbuffer_availbytes(rbi, &read, &write);
52
53 return read;
54}
55
Hank Janssen3e7ee492009-07-13 16:02:34 -070056
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070057/*
58 * hv_get_next_write_location()
59 *
60 * Get the next write location for the specified ring buffer
61 *
62 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -070063static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -070064hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -070065{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080066 u32 next = ring_info->ring_buffer->write_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -070067
Hank Janssen3e7ee492009-07-13 16:02:34 -070068 return next;
69}
70
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070071/*
72 * hv_set_next_write_location()
73 *
74 * Set the next write location for the specified ring buffer
75 *
76 */
Hank Janssen3e7ee492009-07-13 16:02:34 -070077static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -070078hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080079 u32 next_write_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -070080{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080081 ring_info->ring_buffer->write_index = next_write_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -070082}
83
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070084/*
85 * hv_get_next_read_location()
86 *
87 * Get the next read location for the specified ring buffer
88 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -070089static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -070090hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -070091{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -080092 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -070093
Hank Janssen3e7ee492009-07-13 16:02:34 -070094 return next;
95}
96
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -070097/*
98 * hv_get_next_readlocation_withoffset()
99 *
100 * Get the next read location + offset for the specified ring buffer.
101 * This allows the caller to skip
102 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700103static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700104hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800105 u32 offset)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700106{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800107 u32 next = ring_info->ring_buffer->read_index;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700108
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800109 next += offset;
110 next %= ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700111
112 return next;
113}
114
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700115/*
116 *
117 * hv_set_next_read_location()
118 *
119 * Set the next read location for the specified ring buffer
120 *
121 */
Hank Janssen3e7ee492009-07-13 16:02:34 -0700122static inline void
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700123hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800124 u32 next_read_location)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700125{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800126 ring_info->ring_buffer->read_index = next_read_location;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700127}
128
129
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700130/*
131 *
132 * hv_get_ring_buffer()
133 *
134 * Get the start of the ring buffer
135 */
Greg Kroah-Hartman8282c402009-07-14 15:06:28 -0700136static inline void *
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700137hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700138{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800139 return (void *)ring_info->ring_buffer->buffer;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700140}
141
142
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700143/*
144 *
145 * hv_get_ring_buffersize()
146 *
147 * Get the size of the ring buffer
148 */
Greg Kroah-Hartman4d643112009-07-14 15:09:36 -0700149static inline u32
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700150hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700151{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800152 return ring_info->ring_datasize;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700153}
154
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700155/*
156 *
157 * hv_get_ring_bufferindices()
158 *
159 * Get the read and write indices as u64 of the specified ring buffer
160 *
161 */
Greg Kroah-Hartman59471432009-07-14 15:10:26 -0700162static inline u64
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700163hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700164{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800165 return (u64)ring_info->ring_buffer->write_index << 32;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700166}
167
K. Y. Srinivasan8f1136a2011-05-10 07:55:31 -0700168/*
169 *
170 * hv_copyfrom_ringbuffer()
171 *
172 * Helper routine to copy to source from ring buffer.
173 * Assume there is enough room. Handles wrap-around in src case only!!
174 *
175 */
176static u32 hv_copyfrom_ringbuffer(
177 struct hv_ring_buffer_info *ring_info,
178 void *dest,
179 u32 destlen,
180 u32 start_read_offset)
181{
182 void *ring_buffer = hv_get_ring_buffer(ring_info);
183 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
184
185 u32 frag_len;
186
187 /* wrap-around detected at the src */
188 if (destlen > ring_buffer_size - start_read_offset) {
189 frag_len = ring_buffer_size - start_read_offset;
190
191 memcpy(dest, ring_buffer + start_read_offset, frag_len);
192 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
193 } else
194
195 memcpy(dest, ring_buffer + start_read_offset, destlen);
196
197
198 start_read_offset += destlen;
199 start_read_offset %= ring_buffer_size;
200
201 return start_read_offset;
202}
203
204
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700205/*
206 *
207 * hv_copyto_ringbuffer()
208 *
209 * Helper routine to copy from source to ring buffer.
210 * Assume there is enough room. Handles wrap-around in dest case only!!
211 *
212 */
213static u32 hv_copyto_ringbuffer(
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800214 struct hv_ring_buffer_info *ring_info,
215 u32 start_write_offset,
216 void *src,
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700217 u32 srclen)
218{
219 void *ring_buffer = hv_get_ring_buffer(ring_info);
220 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
221 u32 frag_len;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700222
K. Y. Srinivasan75815782011-05-10 07:55:32 -0700223 /* wrap-around detected! */
224 if (srclen > ring_buffer_size - start_write_offset) {
225 frag_len = ring_buffer_size - start_write_offset;
226 memcpy(ring_buffer + start_write_offset, src, frag_len);
227 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
228 } else
229 memcpy(ring_buffer + start_write_offset, src, srclen);
230
231 start_write_offset += srclen;
232 start_write_offset %= ring_buffer_size;
233
234 return start_write_offset;
235}
Hank Janssen3e7ee492009-07-13 16:02:34 -0700236
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700237/*
238 *
239 * hv_ringbuffer_get_debuginfo()
240 *
241 * Get various debug metrics for the specified ring buffer
242 *
243 */
K. Y. Srinivasana75b61d2011-05-10 07:55:28 -0700244void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
Greg Kroah-Hartman80682b72010-07-27 11:37:32 -0700245 struct hv_ring_buffer_debug_info *debug_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700246{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800247 u32 bytes_avail_towrite;
248 u32 bytes_avail_toread;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700249
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800250 if (ring_info->ring_buffer) {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700251 hv_get_ringbuffer_availbytes(ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800252 &bytes_avail_toread,
253 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700254
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800255 debug_info->bytes_avail_toread = bytes_avail_toread;
256 debug_info->bytes_avail_towrite = bytes_avail_towrite;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800257 debug_info->current_read_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800258 ring_info->ring_buffer->read_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800259 debug_info->current_write_index =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800260 ring_info->ring_buffer->write_index;
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800261 debug_info->current_interrupt_mask =
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800262 ring_info->ring_buffer->interrupt_mask;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700263 }
264}
265
266
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700267/*
268 *
269 * hv_get_ringbuffer_interrupt_mask()
270 *
271 * Get the interrupt mask for the specified ring buffer
272 *
273 */
K. Y. Srinivasandecc49d2011-05-10 07:55:26 -0700274u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700275{
Haiyang Zhang82f8bd42010-11-08 14:04:45 -0800276 return rbi->ring_buffer->interrupt_mask;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700277}
278
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700279/*
280 *
281 * hv_ringbuffer_init()
282 *
283 *Initialize the ring buffer
284 *
285 */
K. Y. Srinivasan72a95cb2011-05-10 07:55:21 -0700286int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800287 void *buffer, u32 buflen)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700288{
Greg Kroah-Hartman4a1b3ac2010-07-27 11:47:08 -0700289 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
Bill Pemberton3324fb42010-05-05 15:27:49 -0400290 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700291
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800292 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700293
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800294 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
295 ring_info->ring_buffer->read_index =
296 ring_info->ring_buffer->write_index = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700297
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800298 ring_info->ring_size = buflen;
299 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700300
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800301 spin_lock_init(&ring_info->ring_lock);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700302
303 return 0;
304}
305
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700306/*
307 *
308 * hv_ringbuffer_cleanup()
309 *
310 * Cleanup the ring buffer
311 *
312 */
K. Y. Srinivasan2dba6882011-05-10 07:55:22 -0700313void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700314{
Hank Janssen3e7ee492009-07-13 16:02:34 -0700315}
316
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700317/*
318 *
319 * hv_ringbuffer_write()
320 *
321 * Write to the ring buffer
322 *
323 */
K. Y. Srinivasan633c4dc2011-05-10 07:55:23 -0700324int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
Greg Kroah-Hartman3523a802009-08-17 17:22:08 -0700325 struct scatterlist *sglist, u32 sgcount)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700326{
C. Bartlett4408f532010-02-03 15:34:27 +0000327 int i = 0;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800328 u32 bytes_avail_towrite;
329 u32 bytes_avail_toread;
330 u32 totalbytes_towrite = 0;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700331
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200332 struct scatterlist *sg;
K. Y. Srinivasan66a60542011-05-10 07:55:33 -0700333 u32 next_write_location;
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800334 u64 prev_indices = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700335 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700336
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200337 for_each_sg(sglist, sg, sgcount, i)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700338 {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800339 totalbytes_towrite += sg->length;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700340 }
341
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800342 totalbytes_towrite += sizeof(u64);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700343
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800344 spin_lock_irqsave(&outring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700345
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700346 hv_get_ringbuffer_availbytes(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800347 &bytes_avail_toread,
348 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700349
Hank Janssen3e7ee492009-07-13 16:02:34 -0700350
C. Bartlett4408f532010-02-03 15:34:27 +0000351 /* If there is only room for the packet, assume it is full. */
352 /* Otherwise, the next time around, we think the ring buffer */
Bill Pemberton454f18a2009-07-27 16:47:24 -0400353 /* is empty since the read index == write index */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800354 if (bytes_avail_towrite <= totalbytes_towrite) {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800355 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700356 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700357 }
358
Bill Pemberton454f18a2009-07-27 16:47:24 -0400359 /* Write to the ring buffer */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700360 next_write_location = hv_get_next_write_location(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700361
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200362 for_each_sg(sglist, sg, sgcount, i)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700363 {
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700364 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800365 next_write_location,
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200366 sg_virt(sg),
367 sg->length);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700368 }
369
Bill Pemberton454f18a2009-07-27 16:47:24 -0400370 /* Set previous packet start */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700371 prev_indices = hv_get_ring_bufferindices(outring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700372
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700373 next_write_location = hv_copyto_ringbuffer(outring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800374 next_write_location,
375 &prev_indices,
Nicolas Palixb219b3f2009-07-30 17:37:23 +0200376 sizeof(u64));
Hank Janssen3e7ee492009-07-13 16:02:34 -0700377
Bill Pemberton454f18a2009-07-27 16:47:24 -0400378 /* Make sure we flush all writes before updating the writeIndex */
K. Y. Srinivasane690b5a2011-06-06 15:49:53 -0700379 smp_wmb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700380
Bill Pemberton454f18a2009-07-27 16:47:24 -0400381 /* Now, update the write location */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700382 hv_set_next_write_location(outring_info, next_write_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700383
Hank Janssen3e7ee492009-07-13 16:02:34 -0700384
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800385 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700386 return 0;
387}
388
389
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700390/*
391 *
392 * hv_ringbuffer_peek()
393 *
394 * Read without advancing the read index
395 *
396 */
K. Y. Srinivasana89186c2011-05-10 07:55:24 -0700397int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800398 void *Buffer, u32 buflen)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700399{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800400 u32 bytes_avail_towrite;
401 u32 bytes_avail_toread;
402 u32 next_read_location = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700403 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700404
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800405 spin_lock_irqsave(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700406
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700407 hv_get_ringbuffer_availbytes(Inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800408 &bytes_avail_toread,
409 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700410
Bill Pemberton454f18a2009-07-27 16:47:24 -0400411 /* Make sure there is something to read */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800412 if (bytes_avail_toread < buflen) {
Hank Janssen3e7ee492009-07-13 16:02:34 -0700413
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800414 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700415
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700416 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700417 }
418
Bill Pemberton454f18a2009-07-27 16:47:24 -0400419 /* Convert to byte offset */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700420 next_read_location = hv_get_next_read_location(Inring_info);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700421
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700422 next_read_location = hv_copyfrom_ringbuffer(Inring_info,
C. Bartlett4408f532010-02-03 15:34:27 +0000423 Buffer,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800424 buflen,
425 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700426
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800427 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700428
429 return 0;
430}
431
432
K. Y. Srinivasanb2a5a582011-05-10 07:55:30 -0700433/*
434 *
435 * hv_ringbuffer_read()
436 *
437 * Read and advance the read index
438 *
439 */
K. Y. Srinivasan38397c82011-05-10 07:55:25 -0700440int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800441 u32 buflen, u32 offset)
Hank Janssen3e7ee492009-07-13 16:02:34 -0700442{
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800443 u32 bytes_avail_towrite;
444 u32 bytes_avail_toread;
445 u32 next_read_location = 0;
446 u64 prev_indices = 0;
Greg Kroah-Hartmana98f96e2009-07-15 14:55:14 -0700447 unsigned long flags;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700448
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800449 if (buflen <= 0)
Bill Pembertona16e1482010-05-05 15:27:50 -0400450 return -EINVAL;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700451
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800452 spin_lock_irqsave(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700453
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700454 hv_get_ringbuffer_availbytes(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800455 &bytes_avail_toread,
456 &bytes_avail_towrite);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700457
Bill Pemberton454f18a2009-07-27 16:47:24 -0400458 /* Make sure there is something to read */
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800459 if (bytes_avail_toread < buflen) {
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800460 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700461
K. Y. Srinivasand2598f02011-08-25 09:48:58 -0700462 return -EAGAIN;
Hank Janssen3e7ee492009-07-13 16:02:34 -0700463 }
464
Haiyang Zhang1ac58642010-11-08 14:04:47 -0800465 next_read_location =
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700466 hv_get_next_readlocation_withoffset(inring_info, offset);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700467
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700468 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800469 buffer,
470 buflen,
471 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700472
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700473 next_read_location = hv_copyfrom_ringbuffer(inring_info,
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800474 &prev_indices,
C. Bartlett4408f532010-02-03 15:34:27 +0000475 sizeof(u64),
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800476 next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700477
Bill Pemberton454f18a2009-07-27 16:47:24 -0400478 /* Make sure all reads are done before we update the read index since */
C. Bartlett4408f532010-02-03 15:34:27 +0000479 /* the writer may start writing to the read area once the read index */
480 /*is updated */
K. Y. Srinivasanef0d5b22011-06-06 15:49:51 -0700481 smp_mb();
Hank Janssen3e7ee492009-07-13 16:02:34 -0700482
Bill Pemberton454f18a2009-07-27 16:47:24 -0400483 /* Update the read index */
K. Y. Srinivasan2b8a9122011-05-10 07:55:29 -0700484 hv_set_next_read_location(inring_info, next_read_location);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700485
Haiyang Zhangfc8c72e2010-11-08 14:04:46 -0800486 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
Hank Janssen3e7ee492009-07-13 16:02:34 -0700487
488 return 0;
489}