Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 1 | /* The industrial I/O simple minimally locked ring buffer. |
| 2 | * |
| 3 | * Copyright (c) 2008 Jonathan Cameron |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | */ |
| 9 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 11 | #include <linux/kernel.h> |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/device.h> |
| 14 | #include <linux/workqueue.h> |
| 15 | #include "ring_sw.h" |
| 16 | |
Jonathan Cameron | 6f2dfb3 | 2010-03-02 13:35:35 +0000 | [diff] [blame] | 17 | static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring, |
| 18 | int bytes_per_datum, int length) |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 19 | { |
| 20 | if ((length == 0) || (bytes_per_datum == 0)) |
| 21 | return -EINVAL; |
Jonathan Cameron | 6f2dfb3 | 2010-03-02 13:35:35 +0000 | [diff] [blame] | 22 | __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length); |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 23 | ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL); |
| 24 | ring->read_p = 0; |
| 25 | ring->write_p = 0; |
| 26 | ring->last_written_p = 0; |
| 27 | ring->half_p = 0; |
| 28 | return ring->data ? 0 : -ENOMEM; |
| 29 | } |
| 30 | |
Jonathan Cameron | 6f2dfb3 | 2010-03-02 13:35:35 +0000 | [diff] [blame] | 31 | static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring) |
| 32 | { |
| 33 | spin_lock_init(&ring->use_lock); |
| 34 | } |
| 35 | |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 36 | static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring) |
| 37 | { |
| 38 | kfree(ring->data); |
| 39 | } |
| 40 | |
| 41 | void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r) |
| 42 | { |
| 43 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
| 44 | spin_lock(&ring->use_lock); |
| 45 | ring->use_count++; |
| 46 | spin_unlock(&ring->use_lock); |
| 47 | } |
| 48 | EXPORT_SYMBOL(iio_mark_sw_rb_in_use); |
| 49 | |
| 50 | void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r) |
| 51 | { |
| 52 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
| 53 | spin_lock(&ring->use_lock); |
| 54 | ring->use_count--; |
| 55 | spin_unlock(&ring->use_lock); |
| 56 | } |
| 57 | EXPORT_SYMBOL(iio_unmark_sw_rb_in_use); |
| 58 | |
| 59 | |
| 60 | /* Ring buffer related functionality */ |
| 61 | /* Store to ring is typically called in the bh of a data ready interrupt handler |
| 62 | * in the device driver */ |
| 63 | /* Lock always held if their is a chance this may be called */ |
| 64 | /* Only one of these per ring may run concurrently - enforced by drivers */ |
| 65 | int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring, |
| 66 | unsigned char *data, |
| 67 | s64 timestamp) |
| 68 | { |
| 69 | int ret = 0; |
| 70 | int code; |
| 71 | unsigned char *temp_ptr, *change_test_ptr; |
| 72 | |
| 73 | /* initial store */ |
| 74 | if (unlikely(ring->write_p == 0)) { |
| 75 | ring->write_p = ring->data; |
| 76 | /* Doesn't actually matter if this is out of the set |
| 77 | * as long as the read pointer is valid before this |
| 78 | * passes it - guaranteed as set later in this function. |
| 79 | */ |
| 80 | ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2; |
| 81 | } |
| 82 | /* Copy data to where ever the current write pointer says */ |
| 83 | memcpy(ring->write_p, data, ring->buf.bpd); |
| 84 | barrier(); |
| 85 | /* Update the pointer used to get most recent value. |
| 86 | * Always valid as either points to latest or second latest value. |
| 87 | * Before this runs it is null and read attempts fail with -EAGAIN. |
| 88 | */ |
| 89 | ring->last_written_p = ring->write_p; |
| 90 | barrier(); |
| 91 | /* temp_ptr used to ensure we never have an invalid pointer |
| 92 | * it may be slightly lagging, but never invalid |
| 93 | */ |
| 94 | temp_ptr = ring->write_p + ring->buf.bpd; |
| 95 | /* End of ring, back to the beginning */ |
| 96 | if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd) |
| 97 | temp_ptr = ring->data; |
| 98 | /* Update the write pointer |
| 99 | * always valid as long as this is the only function able to write. |
| 100 | * Care needed with smp systems to ensure more than one ring fill |
| 101 | * is never scheduled. |
| 102 | */ |
| 103 | ring->write_p = temp_ptr; |
| 104 | |
| 105 | if (ring->read_p == 0) |
| 106 | ring->read_p = ring->data; |
| 107 | /* Buffer full - move the read pointer and create / escalate |
| 108 | * ring event */ |
| 109 | /* Tricky case - if the read pointer moves before we adjust it. |
| 110 | * Handle by not pushing if it has moved - may result in occasional |
| 111 | * unnecessary buffer full events when it wasn't quite true. |
| 112 | */ |
| 113 | else if (ring->write_p == ring->read_p) { |
| 114 | change_test_ptr = ring->read_p; |
| 115 | temp_ptr = change_test_ptr + ring->buf.bpd; |
| 116 | if (temp_ptr |
| 117 | == ring->data + ring->buf.length*ring->buf.bpd) { |
| 118 | temp_ptr = ring->data; |
| 119 | } |
| 120 | /* We are moving pointer on one because the ring is full. Any |
| 121 | * change to the read pointer will be this or greater. |
| 122 | */ |
| 123 | if (change_test_ptr == ring->read_p) |
| 124 | ring->read_p = temp_ptr; |
| 125 | |
| 126 | spin_lock(&ring->buf.shared_ev_pointer.lock); |
| 127 | |
| 128 | ret = iio_push_or_escallate_ring_event(&ring->buf, |
Roel Van Nyen | c849d25 | 2010-04-29 19:27:31 +0200 | [diff] [blame] | 129 | IIO_EVENT_CODE_RING_100_FULL, timestamp); |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 130 | spin_unlock(&ring->buf.shared_ev_pointer.lock); |
| 131 | if (ret) |
| 132 | goto error_ret; |
| 133 | } |
| 134 | /* investigate if our event barrier has been passed */ |
| 135 | /* There are definite 'issues' with this and chances of |
| 136 | * simultaneous read */ |
| 137 | /* Also need to use loop count to ensure this only happens once */ |
| 138 | ring->half_p += ring->buf.bpd; |
| 139 | if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd) |
| 140 | ring->half_p = ring->data; |
| 141 | if (ring->half_p == ring->read_p) { |
| 142 | spin_lock(&ring->buf.shared_ev_pointer.lock); |
| 143 | code = IIO_EVENT_CODE_RING_50_FULL; |
| 144 | ret = __iio_push_event(&ring->buf.ev_int, |
| 145 | code, |
| 146 | timestamp, |
| 147 | &ring->buf.shared_ev_pointer); |
| 148 | spin_unlock(&ring->buf.shared_ev_pointer.lock); |
| 149 | } |
| 150 | error_ret: |
| 151 | return ret; |
| 152 | } |
| 153 | |
| 154 | int iio_rip_sw_rb(struct iio_ring_buffer *r, |
| 155 | size_t count, u8 **data, int *dead_offset) |
| 156 | { |
| 157 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
| 158 | |
| 159 | u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p; |
| 160 | int ret, max_copied; |
| 161 | int bytes_to_rip; |
| 162 | |
| 163 | /* A userspace program has probably made an error if it tries to |
| 164 | * read something that is not a whole number of bpds. |
| 165 | * Return an error. |
| 166 | */ |
| 167 | if (count % ring->buf.bpd) { |
| 168 | ret = -EINVAL; |
| 169 | printk(KERN_INFO "Ring buffer read request not whole number of" |
| 170 | "samples: Request bytes %zd, Current bpd %d\n", |
| 171 | count, ring->buf.bpd); |
| 172 | goto error_ret; |
| 173 | } |
| 174 | /* Limit size to whole of ring buffer */ |
| 175 | bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count); |
| 176 | |
| 177 | *data = kmalloc(bytes_to_rip, GFP_KERNEL); |
| 178 | if (*data == NULL) { |
| 179 | ret = -ENOMEM; |
| 180 | goto error_ret; |
| 181 | } |
| 182 | |
| 183 | /* build local copy */ |
| 184 | initial_read_p = ring->read_p; |
| 185 | if (unlikely(initial_read_p == 0)) { /* No data here as yet */ |
| 186 | ret = 0; |
| 187 | goto error_free_data_cpy; |
| 188 | } |
| 189 | |
| 190 | initial_write_p = ring->write_p; |
| 191 | |
| 192 | /* Need a consistent pair */ |
| 193 | while ((initial_read_p != ring->read_p) |
| 194 | || (initial_write_p != ring->write_p)) { |
| 195 | initial_read_p = ring->read_p; |
| 196 | initial_write_p = ring->write_p; |
| 197 | } |
| 198 | if (initial_write_p == initial_read_p) { |
| 199 | /* No new data available.*/ |
| 200 | ret = 0; |
| 201 | goto error_free_data_cpy; |
| 202 | } |
| 203 | |
| 204 | if (initial_write_p >= initial_read_p + bytes_to_rip) { |
| 205 | /* write_p is greater than necessary, all is easy */ |
| 206 | max_copied = bytes_to_rip; |
| 207 | memcpy(*data, initial_read_p, max_copied); |
| 208 | end_read_p = initial_read_p + max_copied; |
| 209 | } else if (initial_write_p > initial_read_p) { |
| 210 | /*not enough data to cpy */ |
| 211 | max_copied = initial_write_p - initial_read_p; |
| 212 | memcpy(*data, initial_read_p, max_copied); |
| 213 | end_read_p = initial_write_p; |
| 214 | } else { |
| 215 | /* going through 'end' of ring buffer */ |
| 216 | max_copied = ring->data |
| 217 | + ring->buf.length*ring->buf.bpd - initial_read_p; |
| 218 | memcpy(*data, initial_read_p, max_copied); |
| 219 | /* possible we are done if we align precisely with end */ |
| 220 | if (max_copied == bytes_to_rip) |
| 221 | end_read_p = ring->data; |
| 222 | else if (initial_write_p |
| 223 | > ring->data + bytes_to_rip - max_copied) { |
| 224 | /* enough data to finish */ |
| 225 | memcpy(*data + max_copied, ring->data, |
| 226 | bytes_to_rip - max_copied); |
| 227 | max_copied = bytes_to_rip; |
| 228 | end_read_p = ring->data + (bytes_to_rip - max_copied); |
| 229 | } else { /* not enough data */ |
| 230 | memcpy(*data + max_copied, ring->data, |
| 231 | initial_write_p - ring->data); |
| 232 | max_copied += initial_write_p - ring->data; |
| 233 | end_read_p = initial_write_p; |
| 234 | } |
| 235 | } |
| 236 | /* Now to verify which section was cleanly copied - i.e. how far |
| 237 | * read pointer has been pushed */ |
| 238 | current_read_p = ring->read_p; |
| 239 | |
| 240 | if (initial_read_p <= current_read_p) |
| 241 | *dead_offset = current_read_p - initial_read_p; |
| 242 | else |
| 243 | *dead_offset = ring->buf.length*ring->buf.bpd |
| 244 | - (initial_read_p - current_read_p); |
| 245 | |
| 246 | /* possible issue if the initial write has been lapped or indeed |
| 247 | * the point we were reading to has been passed */ |
| 248 | /* No valid data read. |
| 249 | * In this case the read pointer is already correct having been |
| 250 | * pushed further than we would look. */ |
| 251 | if (max_copied - *dead_offset < 0) { |
| 252 | ret = 0; |
| 253 | goto error_free_data_cpy; |
| 254 | } |
| 255 | |
| 256 | /* setup the next read position */ |
| 257 | /* Beware, this may fail due to concurrency fun and games. |
| 258 | * Possible that sufficient fill commands have run to push the read |
| 259 | * pointer past where we would be after the rip. If this occurs, leave |
| 260 | * it be. |
| 261 | */ |
| 262 | /* Tricky - deal with loops */ |
| 263 | |
| 264 | while (ring->read_p != end_read_p) |
| 265 | ring->read_p = end_read_p; |
| 266 | |
| 267 | return max_copied - *dead_offset; |
| 268 | |
| 269 | error_free_data_cpy: |
| 270 | kfree(*data); |
| 271 | error_ret: |
| 272 | return ret; |
| 273 | } |
| 274 | EXPORT_SYMBOL(iio_rip_sw_rb); |
| 275 | |
| 276 | int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp) |
| 277 | { |
| 278 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
| 279 | return iio_store_to_sw_ring(ring, data, timestamp); |
| 280 | } |
| 281 | EXPORT_SYMBOL(iio_store_to_sw_rb); |
| 282 | |
| 283 | int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring, |
| 284 | unsigned char *data) |
| 285 | { |
| 286 | unsigned char *last_written_p_copy; |
| 287 | |
| 288 | iio_mark_sw_rb_in_use(&ring->buf); |
| 289 | again: |
| 290 | barrier(); |
| 291 | last_written_p_copy = ring->last_written_p; |
| 292 | barrier(); /*unnessecary? */ |
| 293 | /* Check there is anything here */ |
| 294 | if (last_written_p_copy == 0) |
| 295 | return -EAGAIN; |
| 296 | memcpy(data, last_written_p_copy, ring->buf.bpd); |
| 297 | |
Jonathan Cameron | 8474ddd | 2010-05-04 14:43:11 +0100 | [diff] [blame^] | 298 | if (unlikely(ring->last_written_p != last_written_p_copy)) |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 299 | goto again; |
| 300 | |
| 301 | iio_unmark_sw_rb_in_use(&ring->buf); |
| 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, |
| 306 | unsigned char *data) |
| 307 | { |
| 308 | return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data); |
| 309 | } |
| 310 | EXPORT_SYMBOL(iio_read_last_from_sw_rb); |
| 311 | |
| 312 | int iio_request_update_sw_rb(struct iio_ring_buffer *r) |
| 313 | { |
| 314 | int ret = 0; |
| 315 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
| 316 | |
| 317 | spin_lock(&ring->use_lock); |
| 318 | if (!ring->update_needed) |
| 319 | goto error_ret; |
| 320 | if (ring->use_count) { |
| 321 | ret = -EAGAIN; |
| 322 | goto error_ret; |
| 323 | } |
| 324 | __iio_free_sw_ring_buffer(ring); |
Jonathan Cameron | 6f2dfb3 | 2010-03-02 13:35:35 +0000 | [diff] [blame] | 325 | ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd, |
| 326 | ring->buf.length); |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 327 | error_ret: |
| 328 | spin_unlock(&ring->use_lock); |
| 329 | return ret; |
| 330 | } |
| 331 | EXPORT_SYMBOL(iio_request_update_sw_rb); |
| 332 | |
| 333 | int iio_get_bpd_sw_rb(struct iio_ring_buffer *r) |
| 334 | { |
| 335 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
| 336 | return ring->buf.bpd; |
| 337 | } |
| 338 | EXPORT_SYMBOL(iio_get_bpd_sw_rb); |
| 339 | |
| 340 | int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd) |
| 341 | { |
| 342 | if (r->bpd != bpd) { |
| 343 | r->bpd = bpd; |
| 344 | if (r->access.mark_param_change) |
| 345 | r->access.mark_param_change(r); |
| 346 | } |
| 347 | return 0; |
| 348 | } |
| 349 | EXPORT_SYMBOL(iio_set_bpd_sw_rb); |
| 350 | |
| 351 | int iio_get_length_sw_rb(struct iio_ring_buffer *r) |
| 352 | { |
| 353 | return r->length; |
| 354 | } |
| 355 | EXPORT_SYMBOL(iio_get_length_sw_rb); |
| 356 | |
| 357 | int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length) |
| 358 | { |
| 359 | if (r->length != length) { |
| 360 | r->length = length; |
| 361 | if (r->access.mark_param_change) |
| 362 | r->access.mark_param_change(r); |
| 363 | } |
| 364 | return 0; |
| 365 | } |
| 366 | EXPORT_SYMBOL(iio_set_length_sw_rb); |
| 367 | |
| 368 | int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r) |
| 369 | { |
| 370 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); |
| 371 | ring->update_needed = true; |
| 372 | return 0; |
| 373 | } |
| 374 | EXPORT_SYMBOL(iio_mark_update_needed_sw_rb); |
| 375 | |
| 376 | static void iio_sw_rb_release(struct device *dev) |
| 377 | { |
| 378 | struct iio_ring_buffer *r = to_iio_ring_buffer(dev); |
| 379 | kfree(iio_to_sw_ring(r)); |
| 380 | } |
| 381 | |
| 382 | static IIO_RING_ENABLE_ATTR; |
| 383 | static IIO_RING_BPS_ATTR; |
| 384 | static IIO_RING_LENGTH_ATTR; |
| 385 | |
| 386 | /* Standard set of ring buffer attributes */ |
| 387 | static struct attribute *iio_ring_attributes[] = { |
| 388 | &dev_attr_length.attr, |
| 389 | &dev_attr_bps.attr, |
| 390 | &dev_attr_ring_enable.attr, |
| 391 | NULL, |
| 392 | }; |
| 393 | |
| 394 | static struct attribute_group iio_ring_attribute_group = { |
| 395 | .attrs = iio_ring_attributes, |
| 396 | }; |
| 397 | |
Alan Cox | 3860dc8 | 2009-08-19 16:59:31 +0100 | [diff] [blame] | 398 | static const struct attribute_group *iio_ring_attribute_groups[] = { |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 399 | &iio_ring_attribute_group, |
| 400 | NULL |
| 401 | }; |
| 402 | |
| 403 | static struct device_type iio_sw_ring_type = { |
| 404 | .release = iio_sw_rb_release, |
| 405 | .groups = iio_ring_attribute_groups, |
| 406 | }; |
| 407 | |
| 408 | struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev) |
| 409 | { |
| 410 | struct iio_ring_buffer *buf; |
| 411 | struct iio_sw_ring_buffer *ring; |
| 412 | |
| 413 | ring = kzalloc(sizeof *ring, GFP_KERNEL); |
| 414 | if (!ring) |
| 415 | return 0; |
| 416 | buf = &ring->buf; |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 417 | iio_ring_buffer_init(buf, indio_dev); |
Jonathan Cameron | 6f2dfb3 | 2010-03-02 13:35:35 +0000 | [diff] [blame] | 418 | __iio_init_sw_ring_buffer(ring); |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 419 | buf->dev.type = &iio_sw_ring_type; |
| 420 | device_initialize(&buf->dev); |
| 421 | buf->dev.parent = &indio_dev->dev; |
Jonathan Cameron | 5aaaeba | 2010-05-04 14:43:00 +0100 | [diff] [blame] | 422 | buf->dev.bus = &iio_bus_type; |
Jonathan Cameron | 2235acb | 2009-08-18 18:06:27 +0100 | [diff] [blame] | 423 | dev_set_drvdata(&buf->dev, (void *)buf); |
| 424 | |
| 425 | return buf; |
| 426 | } |
| 427 | EXPORT_SYMBOL(iio_sw_rb_allocate); |
| 428 | |
| 429 | void iio_sw_rb_free(struct iio_ring_buffer *r) |
| 430 | { |
| 431 | if (r) |
| 432 | iio_put_ring_buffer(r); |
| 433 | } |
| 434 | EXPORT_SYMBOL(iio_sw_rb_free); |
| 435 | MODULE_DESCRIPTION("Industrialio I/O software ring buffer"); |
| 436 | MODULE_LICENSE("GPL"); |