Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 1 | /* |
| 2 | * |
| 3 | * Copyright (c) 2009, Microsoft Corporation. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 17 | * |
| 18 | * Authors: |
| 19 | * Haiyang Zhang <haiyangz@microsoft.com> |
| 20 | * Hank Janssen <hjanssen@microsoft.com> |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 21 | * K. Y. Srinivasan <kys@microsoft.com> |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 22 | * |
| 23 | */ |
Hank Janssen | 0a46618 | 2011-03-29 13:58:47 -0700 | [diff] [blame] | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 25 | |
Greg Kroah-Hartman | a0086dc | 2009-08-17 17:22:08 -0700 | [diff] [blame] | 26 | #include <linux/kernel.h> |
| 27 | #include <linux/mm.h> |
Greg Kroah-Hartman | 46a9719 | 2011-10-04 12:29:52 -0700 | [diff] [blame] | 28 | #include <linux/hyperv.h> |
K. Y. Srinivasan | 3f335ea | 2011-05-12 19:34:15 -0700 | [diff] [blame] | 29 | |
K. Y. Srinivasan | 0f2a661 | 2011-05-12 19:34:28 -0700 | [diff] [blame] | 30 | #include "hyperv_vmbus.h" |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 31 | |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 32 | void hv_begin_read(struct hv_ring_buffer_info *rbi) |
| 33 | { |
| 34 | rbi->ring_buffer->interrupt_mask = 1; |
Jason Wang | 35848f6 | 2013-06-18 13:04:23 +0800 | [diff] [blame] | 35 | mb(); |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 36 | } |
| 37 | |
| 38 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
| 39 | { |
| 40 | u32 read; |
| 41 | u32 write; |
| 42 | |
| 43 | rbi->ring_buffer->interrupt_mask = 0; |
Jason Wang | 35848f6 | 2013-06-18 13:04:23 +0800 | [diff] [blame] | 44 | mb(); |
K. Y. Srinivasan | 6fdf3b2 | 2012-12-01 06:46:32 -0800 | [diff] [blame] | 45 | |
| 46 | /* |
| 47 | * Now check to see if the ring buffer is still empty. |
| 48 | * If it is not, we raced and we need to process new |
| 49 | * incoming messages. |
| 50 | */ |
| 51 | hv_get_ringbuffer_availbytes(rbi, &read, &write); |
| 52 | |
| 53 | return read; |
| 54 | } |
| 55 | |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 56 | /* |
| 57 | * When we write to the ring buffer, check if the host needs to |
| 58 | * be signaled. Here is the details of this protocol: |
| 59 | * |
| 60 | * 1. The host guarantees that while it is draining the |
| 61 | * ring buffer, it will set the interrupt_mask to |
| 62 | * indicate it does not need to be interrupted when |
| 63 | * new data is placed. |
| 64 | * |
| 65 | * 2. The host guarantees that it will completely drain |
| 66 | * the ring buffer before exiting the read loop. Further, |
| 67 | * once the ring buffer is empty, it will clear the |
| 68 | * interrupt_mask and re-check to see if new data has |
| 69 | * arrived. |
| 70 | */ |
| 71 | |
| 72 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) |
| 73 | { |
Jason Wang | 35848f6 | 2013-06-18 13:04:23 +0800 | [diff] [blame] | 74 | mb(); |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 75 | if (rbi->ring_buffer->interrupt_mask) |
| 76 | return false; |
| 77 | |
Jason Wang | e91e84f | 2013-06-20 12:58:57 +0800 | [diff] [blame] | 78 | /* check interrupt_mask before read_index */ |
| 79 | rmb(); |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 80 | /* |
| 81 | * This is the only case we need to signal when the |
| 82 | * ring transitions from being empty to non-empty. |
| 83 | */ |
| 84 | if (old_write == rbi->ring_buffer->read_index) |
| 85 | return true; |
| 86 | |
| 87 | return false; |
| 88 | } |
| 89 | |
K. Y. Srinivasan | c2b8e52 | 2012-12-01 06:46:57 -0800 | [diff] [blame] | 90 | /* |
| 91 | * To optimize the flow management on the send-side, |
| 92 | * when the sender is blocked because of lack of |
| 93 | * sufficient space in the ring buffer, potential the |
| 94 | * consumer of the ring buffer can signal the producer. |
| 95 | * This is controlled by the following parameters: |
| 96 | * |
| 97 | * 1. pending_send_sz: This is the size in bytes that the |
| 98 | * producer is trying to send. |
| 99 | * 2. The feature bit feat_pending_send_sz set to indicate if |
| 100 | * the consumer of the ring will signal when the ring |
| 101 | * state transitions from being full to a state where |
| 102 | * there is room for the producer to send the pending packet. |
| 103 | */ |
| 104 | |
| 105 | static bool hv_need_to_signal_on_read(u32 old_rd, |
| 106 | struct hv_ring_buffer_info *rbi) |
| 107 | { |
| 108 | u32 prev_write_sz; |
| 109 | u32 cur_write_sz; |
| 110 | u32 r_size; |
| 111 | u32 write_loc = rbi->ring_buffer->write_index; |
| 112 | u32 read_loc = rbi->ring_buffer->read_index; |
| 113 | u32 pending_sz = rbi->ring_buffer->pending_send_sz; |
| 114 | |
| 115 | /* |
| 116 | * If the other end is not blocked on write don't bother. |
| 117 | */ |
| 118 | if (pending_sz == 0) |
| 119 | return false; |
| 120 | |
| 121 | r_size = rbi->ring_datasize; |
| 122 | cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : |
| 123 | read_loc - write_loc; |
| 124 | |
| 125 | prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) : |
| 126 | old_rd - write_loc; |
| 127 | |
| 128 | |
| 129 | if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) |
| 130 | return true; |
| 131 | |
| 132 | return false; |
| 133 | } |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 134 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 135 | /* |
| 136 | * hv_get_next_write_location() |
| 137 | * |
| 138 | * Get the next write location for the specified ring buffer |
| 139 | * |
| 140 | */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 141 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 142 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 143 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 144 | u32 next = ring_info->ring_buffer->write_index; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 145 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 146 | return next; |
| 147 | } |
| 148 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 149 | /* |
| 150 | * hv_set_next_write_location() |
| 151 | * |
| 152 | * Set the next write location for the specified ring buffer |
| 153 | * |
| 154 | */ |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 155 | static inline void |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 156 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 157 | u32 next_write_location) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 158 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 159 | ring_info->ring_buffer->write_index = next_write_location; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 160 | } |
| 161 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 162 | /* |
| 163 | * hv_get_next_read_location() |
| 164 | * |
| 165 | * Get the next read location for the specified ring buffer |
| 166 | */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 167 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 168 | hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 169 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 170 | u32 next = ring_info->ring_buffer->read_index; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 171 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 172 | return next; |
| 173 | } |
| 174 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 175 | /* |
| 176 | * hv_get_next_readlocation_withoffset() |
| 177 | * |
| 178 | * Get the next read location + offset for the specified ring buffer. |
| 179 | * This allows the caller to skip |
| 180 | */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 181 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 182 | hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, |
Haiyang Zhang | 1ac5864 | 2010-11-08 14:04:47 -0800 | [diff] [blame] | 183 | u32 offset) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 184 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 185 | u32 next = ring_info->ring_buffer->read_index; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 186 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 187 | next += offset; |
| 188 | next %= ring_info->ring_datasize; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 189 | |
| 190 | return next; |
| 191 | } |
| 192 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 193 | /* |
| 194 | * |
| 195 | * hv_set_next_read_location() |
| 196 | * |
| 197 | * Set the next read location for the specified ring buffer |
| 198 | * |
| 199 | */ |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 200 | static inline void |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 201 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 202 | u32 next_read_location) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 203 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 204 | ring_info->ring_buffer->read_index = next_read_location; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 205 | } |
| 206 | |
| 207 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 208 | /* |
| 209 | * |
| 210 | * hv_get_ring_buffer() |
| 211 | * |
| 212 | * Get the start of the ring buffer |
| 213 | */ |
Greg Kroah-Hartman | 8282c40 | 2009-07-14 15:06:28 -0700 | [diff] [blame] | 214 | static inline void * |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 215 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 216 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 217 | return (void *)ring_info->ring_buffer->buffer; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 221 | /* |
| 222 | * |
| 223 | * hv_get_ring_buffersize() |
| 224 | * |
| 225 | * Get the size of the ring buffer |
| 226 | */ |
Greg Kroah-Hartman | 4d64311 | 2009-07-14 15:09:36 -0700 | [diff] [blame] | 227 | static inline u32 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 228 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 229 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 230 | return ring_info->ring_datasize; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 231 | } |
| 232 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 233 | /* |
| 234 | * |
| 235 | * hv_get_ring_bufferindices() |
| 236 | * |
| 237 | * Get the read and write indices as u64 of the specified ring buffer |
| 238 | * |
| 239 | */ |
Greg Kroah-Hartman | 5947143 | 2009-07-14 15:10:26 -0700 | [diff] [blame] | 240 | static inline u64 |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 241 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 242 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 243 | return (u64)ring_info->ring_buffer->write_index << 32; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 244 | } |
| 245 | |
K. Y. Srinivasan | 8f1136a | 2011-05-10 07:55:31 -0700 | [diff] [blame] | 246 | /* |
| 247 | * |
| 248 | * hv_copyfrom_ringbuffer() |
| 249 | * |
| 250 | * Helper routine to copy to source from ring buffer. |
| 251 | * Assume there is enough room. Handles wrap-around in src case only!! |
| 252 | * |
| 253 | */ |
| 254 | static u32 hv_copyfrom_ringbuffer( |
| 255 | struct hv_ring_buffer_info *ring_info, |
| 256 | void *dest, |
| 257 | u32 destlen, |
| 258 | u32 start_read_offset) |
| 259 | { |
| 260 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
| 261 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
| 262 | |
| 263 | u32 frag_len; |
| 264 | |
| 265 | /* wrap-around detected at the src */ |
| 266 | if (destlen > ring_buffer_size - start_read_offset) { |
| 267 | frag_len = ring_buffer_size - start_read_offset; |
| 268 | |
| 269 | memcpy(dest, ring_buffer + start_read_offset, frag_len); |
| 270 | memcpy(dest + frag_len, ring_buffer, destlen - frag_len); |
| 271 | } else |
| 272 | |
| 273 | memcpy(dest, ring_buffer + start_read_offset, destlen); |
| 274 | |
| 275 | |
| 276 | start_read_offset += destlen; |
| 277 | start_read_offset %= ring_buffer_size; |
| 278 | |
| 279 | return start_read_offset; |
| 280 | } |
| 281 | |
| 282 | |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 283 | /* |
| 284 | * |
| 285 | * hv_copyto_ringbuffer() |
| 286 | * |
| 287 | * Helper routine to copy from source to ring buffer. |
| 288 | * Assume there is enough room. Handles wrap-around in dest case only!! |
| 289 | * |
| 290 | */ |
| 291 | static u32 hv_copyto_ringbuffer( |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 292 | struct hv_ring_buffer_info *ring_info, |
| 293 | u32 start_write_offset, |
| 294 | void *src, |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 295 | u32 srclen) |
| 296 | { |
| 297 | void *ring_buffer = hv_get_ring_buffer(ring_info); |
| 298 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); |
| 299 | u32 frag_len; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 300 | |
K. Y. Srinivasan | 7581578 | 2011-05-10 07:55:32 -0700 | [diff] [blame] | 301 | /* wrap-around detected! */ |
| 302 | if (srclen > ring_buffer_size - start_write_offset) { |
| 303 | frag_len = ring_buffer_size - start_write_offset; |
| 304 | memcpy(ring_buffer + start_write_offset, src, frag_len); |
| 305 | memcpy(ring_buffer, src + frag_len, srclen - frag_len); |
| 306 | } else |
| 307 | memcpy(ring_buffer + start_write_offset, src, srclen); |
| 308 | |
| 309 | start_write_offset += srclen; |
| 310 | start_write_offset %= ring_buffer_size; |
| 311 | |
| 312 | return start_write_offset; |
| 313 | } |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 314 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 315 | /* |
| 316 | * |
| 317 | * hv_ringbuffer_get_debuginfo() |
| 318 | * |
| 319 | * Get various debug metrics for the specified ring buffer |
| 320 | * |
| 321 | */ |
K. Y. Srinivasan | a75b61d | 2011-05-10 07:55:28 -0700 | [diff] [blame] | 322 | void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
Greg Kroah-Hartman | 80682b7 | 2010-07-27 11:37:32 -0700 | [diff] [blame] | 323 | struct hv_ring_buffer_debug_info *debug_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 324 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 325 | u32 bytes_avail_towrite; |
| 326 | u32 bytes_avail_toread; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 327 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 328 | if (ring_info->ring_buffer) { |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 329 | hv_get_ringbuffer_availbytes(ring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 330 | &bytes_avail_toread, |
| 331 | &bytes_avail_towrite); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 332 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 333 | debug_info->bytes_avail_toread = bytes_avail_toread; |
| 334 | debug_info->bytes_avail_towrite = bytes_avail_towrite; |
Haiyang Zhang | 82f8bd4 | 2010-11-08 14:04:45 -0800 | [diff] [blame] | 335 | debug_info->current_read_index = |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 336 | ring_info->ring_buffer->read_index; |
Haiyang Zhang | 82f8bd4 | 2010-11-08 14:04:45 -0800 | [diff] [blame] | 337 | debug_info->current_write_index = |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 338 | ring_info->ring_buffer->write_index; |
Haiyang Zhang | 82f8bd4 | 2010-11-08 14:04:45 -0800 | [diff] [blame] | 339 | debug_info->current_interrupt_mask = |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 340 | ring_info->ring_buffer->interrupt_mask; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 341 | } |
| 342 | } |
| 343 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 344 | /* |
| 345 | * |
| 346 | * hv_ringbuffer_init() |
| 347 | * |
| 348 | *Initialize the ring buffer |
| 349 | * |
| 350 | */ |
K. Y. Srinivasan | 72a95cb | 2011-05-10 07:55:21 -0700 | [diff] [blame] | 351 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 352 | void *buffer, u32 buflen) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 353 | { |
Greg Kroah-Hartman | 4a1b3ac | 2010-07-27 11:47:08 -0700 | [diff] [blame] | 354 | if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) |
Bill Pemberton | 3324fb4 | 2010-05-05 15:27:49 -0400 | [diff] [blame] | 355 | return -EINVAL; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 356 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 357 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 358 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 359 | ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; |
| 360 | ring_info->ring_buffer->read_index = |
| 361 | ring_info->ring_buffer->write_index = 0; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 362 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 363 | ring_info->ring_size = buflen; |
| 364 | ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 365 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 366 | spin_lock_init(&ring_info->ring_lock); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 371 | /* |
| 372 | * |
| 373 | * hv_ringbuffer_cleanup() |
| 374 | * |
| 375 | * Cleanup the ring buffer |
| 376 | * |
| 377 | */ |
K. Y. Srinivasan | 2dba688 | 2011-05-10 07:55:22 -0700 | [diff] [blame] | 378 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 379 | { |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 380 | } |
| 381 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 382 | /* |
| 383 | * |
| 384 | * hv_ringbuffer_write() |
| 385 | * |
| 386 | * Write to the ring buffer |
| 387 | * |
| 388 | */ |
K. Y. Srinivasan | 633c4dc | 2011-05-10 07:55:23 -0700 | [diff] [blame] | 389 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 390 | struct scatterlist *sglist, u32 sgcount, bool *signal) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 391 | { |
C. Bartlett | 4408f53 | 2010-02-03 15:34:27 +0000 | [diff] [blame] | 392 | int i = 0; |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 393 | u32 bytes_avail_towrite; |
| 394 | u32 bytes_avail_toread; |
| 395 | u32 totalbytes_towrite = 0; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 396 | |
Nicolas Palix | b219b3f | 2009-07-30 17:37:23 +0200 | [diff] [blame] | 397 | struct scatterlist *sg; |
K. Y. Srinivasan | 66a6054 | 2011-05-10 07:55:33 -0700 | [diff] [blame] | 398 | u32 next_write_location; |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 399 | u32 old_write; |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 400 | u64 prev_indices = 0; |
Greg Kroah-Hartman | a98f96e | 2009-07-15 14:55:14 -0700 | [diff] [blame] | 401 | unsigned long flags; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 402 | |
Nicolas Palix | b219b3f | 2009-07-30 17:37:23 +0200 | [diff] [blame] | 403 | for_each_sg(sglist, sg, sgcount, i) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 404 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 405 | totalbytes_towrite += sg->length; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 406 | } |
| 407 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 408 | totalbytes_towrite += sizeof(u64); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 409 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 410 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 411 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 412 | hv_get_ringbuffer_availbytes(outring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 413 | &bytes_avail_toread, |
| 414 | &bytes_avail_towrite); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 415 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 416 | |
C. Bartlett | 4408f53 | 2010-02-03 15:34:27 +0000 | [diff] [blame] | 417 | /* If there is only room for the packet, assume it is full. */ |
| 418 | /* Otherwise, the next time around, we think the ring buffer */ |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 419 | /* is empty since the read index == write index */ |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 420 | if (bytes_avail_towrite <= totalbytes_towrite) { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 421 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
K. Y. Srinivasan | d2598f0 | 2011-08-25 09:48:58 -0700 | [diff] [blame] | 422 | return -EAGAIN; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 423 | } |
| 424 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 425 | /* Write to the ring buffer */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 426 | next_write_location = hv_get_next_write_location(outring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 427 | |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 428 | old_write = next_write_location; |
| 429 | |
Nicolas Palix | b219b3f | 2009-07-30 17:37:23 +0200 | [diff] [blame] | 430 | for_each_sg(sglist, sg, sgcount, i) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 431 | { |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 432 | next_write_location = hv_copyto_ringbuffer(outring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 433 | next_write_location, |
Nicolas Palix | b219b3f | 2009-07-30 17:37:23 +0200 | [diff] [blame] | 434 | sg_virt(sg), |
| 435 | sg->length); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 436 | } |
| 437 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 438 | /* Set previous packet start */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 439 | prev_indices = hv_get_ring_bufferindices(outring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 440 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 441 | next_write_location = hv_copyto_ringbuffer(outring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 442 | next_write_location, |
| 443 | &prev_indices, |
Nicolas Palix | b219b3f | 2009-07-30 17:37:23 +0200 | [diff] [blame] | 444 | sizeof(u64)); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 445 | |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 446 | /* Issue a full memory barrier before updating the write index */ |
Jason Wang | 35848f6 | 2013-06-18 13:04:23 +0800 | [diff] [blame] | 447 | mb(); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 448 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 449 | /* Now, update the write location */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 450 | hv_set_next_write_location(outring_info, next_write_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 451 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 452 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 453 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
K. Y. Srinivasan | 98fa8cf | 2012-12-01 06:46:36 -0800 | [diff] [blame] | 454 | |
| 455 | *signal = hv_need_to_signal(old_write, outring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 456 | return 0; |
| 457 | } |
| 458 | |
| 459 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 460 | /* |
| 461 | * |
| 462 | * hv_ringbuffer_peek() |
| 463 | * |
| 464 | * Read without advancing the read index |
| 465 | * |
| 466 | */ |
K. Y. Srinivasan | a89186c | 2011-05-10 07:55:24 -0700 | [diff] [blame] | 467 | int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 468 | void *Buffer, u32 buflen) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 469 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 470 | u32 bytes_avail_towrite; |
| 471 | u32 bytes_avail_toread; |
| 472 | u32 next_read_location = 0; |
Greg Kroah-Hartman | a98f96e | 2009-07-15 14:55:14 -0700 | [diff] [blame] | 473 | unsigned long flags; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 474 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 475 | spin_lock_irqsave(&Inring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 476 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 477 | hv_get_ringbuffer_availbytes(Inring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 478 | &bytes_avail_toread, |
| 479 | &bytes_avail_towrite); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 480 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 481 | /* Make sure there is something to read */ |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 482 | if (bytes_avail_toread < buflen) { |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 483 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 484 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 485 | |
K. Y. Srinivasan | d2598f0 | 2011-08-25 09:48:58 -0700 | [diff] [blame] | 486 | return -EAGAIN; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 487 | } |
| 488 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 489 | /* Convert to byte offset */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 490 | next_read_location = hv_get_next_read_location(Inring_info); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 491 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 492 | next_read_location = hv_copyfrom_ringbuffer(Inring_info, |
C. Bartlett | 4408f53 | 2010-02-03 15:34:27 +0000 | [diff] [blame] | 493 | Buffer, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 494 | buflen, |
| 495 | next_read_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 496 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 497 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 498 | |
| 499 | return 0; |
| 500 | } |
| 501 | |
| 502 | |
K. Y. Srinivasan | b2a5a58 | 2011-05-10 07:55:30 -0700 | [diff] [blame] | 503 | /* |
| 504 | * |
| 505 | * hv_ringbuffer_read() |
| 506 | * |
| 507 | * Read and advance the read index |
| 508 | * |
| 509 | */ |
K. Y. Srinivasan | 38397c8 | 2011-05-10 07:55:25 -0700 | [diff] [blame] | 510 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, |
K. Y. Srinivasan | c2b8e52 | 2012-12-01 06:46:57 -0800 | [diff] [blame] | 511 | u32 buflen, u32 offset, bool *signal) |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 512 | { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 513 | u32 bytes_avail_towrite; |
| 514 | u32 bytes_avail_toread; |
| 515 | u32 next_read_location = 0; |
| 516 | u64 prev_indices = 0; |
Greg Kroah-Hartman | a98f96e | 2009-07-15 14:55:14 -0700 | [diff] [blame] | 517 | unsigned long flags; |
K. Y. Srinivasan | c2b8e52 | 2012-12-01 06:46:57 -0800 | [diff] [blame] | 518 | u32 old_read; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 519 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 520 | if (buflen <= 0) |
Bill Pemberton | a16e148 | 2010-05-05 15:27:50 -0400 | [diff] [blame] | 521 | return -EINVAL; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 522 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 523 | spin_lock_irqsave(&inring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 524 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 525 | hv_get_ringbuffer_availbytes(inring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 526 | &bytes_avail_toread, |
| 527 | &bytes_avail_towrite); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 528 | |
K. Y. Srinivasan | c2b8e52 | 2012-12-01 06:46:57 -0800 | [diff] [blame] | 529 | old_read = bytes_avail_toread; |
| 530 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 531 | /* Make sure there is something to read */ |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 532 | if (bytes_avail_toread < buflen) { |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 533 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 534 | |
K. Y. Srinivasan | d2598f0 | 2011-08-25 09:48:58 -0700 | [diff] [blame] | 535 | return -EAGAIN; |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 536 | } |
| 537 | |
Haiyang Zhang | 1ac5864 | 2010-11-08 14:04:47 -0800 | [diff] [blame] | 538 | next_read_location = |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 539 | hv_get_next_readlocation_withoffset(inring_info, offset); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 540 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 541 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 542 | buffer, |
| 543 | buflen, |
| 544 | next_read_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 545 | |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 546 | next_read_location = hv_copyfrom_ringbuffer(inring_info, |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 547 | &prev_indices, |
C. Bartlett | 4408f53 | 2010-02-03 15:34:27 +0000 | [diff] [blame] | 548 | sizeof(u64), |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 549 | next_read_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 550 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 551 | /* Make sure all reads are done before we update the read index since */ |
C. Bartlett | 4408f53 | 2010-02-03 15:34:27 +0000 | [diff] [blame] | 552 | /* the writer may start writing to the read area once the read index */ |
| 553 | /*is updated */ |
Jason Wang | 35848f6 | 2013-06-18 13:04:23 +0800 | [diff] [blame] | 554 | mb(); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 555 | |
Bill Pemberton | 454f18a | 2009-07-27 16:47:24 -0400 | [diff] [blame] | 556 | /* Update the read index */ |
K. Y. Srinivasan | 2b8a912 | 2011-05-10 07:55:29 -0700 | [diff] [blame] | 557 | hv_set_next_read_location(inring_info, next_read_location); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 558 | |
Haiyang Zhang | fc8c72e | 2010-11-08 14:04:46 -0800 | [diff] [blame] | 559 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 560 | |
K. Y. Srinivasan | c2b8e52 | 2012-12-01 06:46:57 -0800 | [diff] [blame] | 561 | *signal = hv_need_to_signal_on_read(old_read, inring_info); |
| 562 | |
Hank Janssen | 3e7ee49 | 2009-07-13 16:02:34 -0700 | [diff] [blame] | 563 | return 0; |
| 564 | } |