blob: 40beadd604da7915dbcd252467a55b6e6ed98090 [file] [log] [blame]
Jonathan Cameron2235acb2009-08-18 18:06:27 +01001/* The industrial I/O simple minimally locked ring buffer.
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010011#include <linux/kernel.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010012#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/workqueue.h>
Jonathan Camerona7348342011-05-18 14:40:55 +010015#include <linux/sched.h>
Jonathan Camerond5857d62011-02-11 13:09:09 +000016#include <linux/poll.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010017#include "ring_sw.h"
Jonathan Cameron59883ba2010-07-11 16:39:18 +010018#include "trigger.h"
Jonathan Cameron2235acb2009-08-18 18:06:27 +010019
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000020static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
21 int bytes_per_datum, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010022{
23 if ((length == 0) || (bytes_per_datum == 0))
24 return -EINVAL;
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000025 __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
Manuel Stahlffcab072010-08-31 11:32:50 +020026 ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070027 ring->read_p = NULL;
28 ring->write_p = NULL;
29 ring->last_written_p = NULL;
30 ring->half_p = NULL;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010031 return ring->data ? 0 : -ENOMEM;
32}
33
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000034static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
35{
36 spin_lock_init(&ring->use_lock);
37}
38
Jonathan Cameron2235acb2009-08-18 18:06:27 +010039static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
40{
41 kfree(ring->data);
42}
43
44void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
45{
46 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
47 spin_lock(&ring->use_lock);
48 ring->use_count++;
49 spin_unlock(&ring->use_lock);
50}
51EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
52
53void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
54{
55 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
56 spin_lock(&ring->use_lock);
57 ring->use_count--;
58 spin_unlock(&ring->use_lock);
59}
60EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
61
62
63/* Ring buffer related functionality */
64/* Store to ring is typically called in the bh of a data ready interrupt handler
65 * in the device driver */
66/* Lock always held if their is a chance this may be called */
67/* Only one of these per ring may run concurrently - enforced by drivers */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070068static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
69 unsigned char *data, s64 timestamp)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010070{
71 int ret = 0;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010072 unsigned char *temp_ptr, *change_test_ptr;
73
74 /* initial store */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -070075 if (unlikely(ring->write_p == NULL)) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +010076 ring->write_p = ring->data;
77 /* Doesn't actually matter if this is out of the set
78 * as long as the read pointer is valid before this
79 * passes it - guaranteed as set later in this function.
80 */
Manuel Stahlffcab072010-08-31 11:32:50 +020081 ring->half_p = ring->data - ring->buf.length*ring->buf.bytes_per_datum/2;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010082 }
83 /* Copy data to where ever the current write pointer says */
Manuel Stahlffcab072010-08-31 11:32:50 +020084 memcpy(ring->write_p, data, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +010085 barrier();
86 /* Update the pointer used to get most recent value.
87 * Always valid as either points to latest or second latest value.
88 * Before this runs it is null and read attempts fail with -EAGAIN.
89 */
90 ring->last_written_p = ring->write_p;
91 barrier();
92 /* temp_ptr used to ensure we never have an invalid pointer
93 * it may be slightly lagging, but never invalid
94 */
Manuel Stahlffcab072010-08-31 11:32:50 +020095 temp_ptr = ring->write_p + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +010096 /* End of ring, back to the beginning */
Manuel Stahlffcab072010-08-31 11:32:50 +020097 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010098 temp_ptr = ring->data;
99 /* Update the write pointer
100 * always valid as long as this is the only function able to write.
101 * Care needed with smp systems to ensure more than one ring fill
102 * is never scheduled.
103 */
104 ring->write_p = temp_ptr;
105
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700106 if (ring->read_p == NULL)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100107 ring->read_p = ring->data;
108 /* Buffer full - move the read pointer and create / escalate
109 * ring event */
110 /* Tricky case - if the read pointer moves before we adjust it.
111 * Handle by not pushing if it has moved - may result in occasional
112 * unnecessary buffer full events when it wasn't quite true.
113 */
114 else if (ring->write_p == ring->read_p) {
115 change_test_ptr = ring->read_p;
Manuel Stahlffcab072010-08-31 11:32:50 +0200116 temp_ptr = change_test_ptr + ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100117 if (temp_ptr
Manuel Stahlffcab072010-08-31 11:32:50 +0200118 == ring->data + ring->buf.length*ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100119 temp_ptr = ring->data;
120 }
121 /* We are moving pointer on one because the ring is full. Any
122 * change to the read pointer will be this or greater.
123 */
124 if (change_test_ptr == ring->read_p)
125 ring->read_p = temp_ptr;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100126 }
127 /* investigate if our event barrier has been passed */
128 /* There are definite 'issues' with this and chances of
129 * simultaneous read */
130 /* Also need to use loop count to ensure this only happens once */
Manuel Stahlffcab072010-08-31 11:32:50 +0200131 ring->half_p += ring->buf.bytes_per_datum;
132 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100133 ring->half_p = ring->data;
134 if (ring->half_p == ring->read_p) {
Jonathan Camerona7348342011-05-18 14:40:55 +0100135 ring->buf.stufftoread = true;
136 wake_up_interruptible(&ring->buf.pollq);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100137 }
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100138 return ret;
139}
140
Jonathan Cameronb4281732011-04-15 18:55:55 +0100141int iio_read_first_n_sw_rb(struct iio_ring_buffer *r,
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100142 size_t n, char __user *buf)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100143{
144 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
145
146 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000147 u8 *data;
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100148 int ret, max_copied, bytes_to_rip, dead_offset;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100149
150 /* A userspace program has probably made an error if it tries to
151 * read something that is not a whole number of bpds.
152 * Return an error.
153 */
Jonathan Cameronb4281732011-04-15 18:55:55 +0100154 if (n % ring->buf.bytes_per_datum) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100155 ret = -EINVAL;
156 printk(KERN_INFO "Ring buffer read request not whole number of"
Manuel Stahlffcab072010-08-31 11:32:50 +0200157 "samples: Request bytes %zd, Current bytes per datum %d\n",
Jonathan Cameronb4281732011-04-15 18:55:55 +0100158 n, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100159 goto error_ret;
160 }
161 /* Limit size to whole of ring buffer */
Jonathan Cameronb4281732011-04-15 18:55:55 +0100162 bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
163 n);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100164
Jonathan Camerond5857d62011-02-11 13:09:09 +0000165 data = kmalloc(bytes_to_rip, GFP_KERNEL);
166 if (data == NULL) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100167 ret = -ENOMEM;
168 goto error_ret;
169 }
170
171 /* build local copy */
172 initial_read_p = ring->read_p;
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700173 if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100174 ret = 0;
175 goto error_free_data_cpy;
176 }
177
178 initial_write_p = ring->write_p;
179
180 /* Need a consistent pair */
181 while ((initial_read_p != ring->read_p)
182 || (initial_write_p != ring->write_p)) {
183 initial_read_p = ring->read_p;
184 initial_write_p = ring->write_p;
185 }
186 if (initial_write_p == initial_read_p) {
187 /* No new data available.*/
188 ret = 0;
189 goto error_free_data_cpy;
190 }
191
192 if (initial_write_p >= initial_read_p + bytes_to_rip) {
193 /* write_p is greater than necessary, all is easy */
194 max_copied = bytes_to_rip;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000195 memcpy(data, initial_read_p, max_copied);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100196 end_read_p = initial_read_p + max_copied;
197 } else if (initial_write_p > initial_read_p) {
198 /*not enough data to cpy */
199 max_copied = initial_write_p - initial_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000200 memcpy(data, initial_read_p, max_copied);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100201 end_read_p = initial_write_p;
202 } else {
203 /* going through 'end' of ring buffer */
204 max_copied = ring->data
Manuel Stahlffcab072010-08-31 11:32:50 +0200205 + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
Jonathan Camerond5857d62011-02-11 13:09:09 +0000206 memcpy(data, initial_read_p, max_copied);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100207 /* possible we are done if we align precisely with end */
208 if (max_copied == bytes_to_rip)
209 end_read_p = ring->data;
210 else if (initial_write_p
211 > ring->data + bytes_to_rip - max_copied) {
212 /* enough data to finish */
Jonathan Camerond5857d62011-02-11 13:09:09 +0000213 memcpy(data + max_copied, ring->data,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100214 bytes_to_rip - max_copied);
215 max_copied = bytes_to_rip;
216 end_read_p = ring->data + (bytes_to_rip - max_copied);
217 } else { /* not enough data */
Jonathan Camerond5857d62011-02-11 13:09:09 +0000218 memcpy(data + max_copied, ring->data,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100219 initial_write_p - ring->data);
220 max_copied += initial_write_p - ring->data;
221 end_read_p = initial_write_p;
222 }
223 }
224 /* Now to verify which section was cleanly copied - i.e. how far
225 * read pointer has been pushed */
226 current_read_p = ring->read_p;
227
228 if (initial_read_p <= current_read_p)
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100229 dead_offset = current_read_p - initial_read_p;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100230 else
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100231 dead_offset = ring->buf.length*ring->buf.bytes_per_datum
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100232 - (initial_read_p - current_read_p);
233
234 /* possible issue if the initial write has been lapped or indeed
235 * the point we were reading to has been passed */
236 /* No valid data read.
237 * In this case the read pointer is already correct having been
238 * pushed further than we would look. */
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100239 if (max_copied - dead_offset < 0) {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100240 ret = 0;
241 goto error_free_data_cpy;
242 }
243
244 /* setup the next read position */
245 /* Beware, this may fail due to concurrency fun and games.
246 * Possible that sufficient fill commands have run to push the read
247 * pointer past where we would be after the rip. If this occurs, leave
248 * it be.
249 */
250 /* Tricky - deal with loops */
251
252 while (ring->read_p != end_read_p)
253 ring->read_p = end_read_p;
254
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100255 ret = max_copied - dead_offset;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100256
Jonathan Cameronb26a2182011-05-18 14:41:02 +0100257 if (copy_to_user(buf, data + dead_offset, ret)) {
Jonathan Camerond5857d62011-02-11 13:09:09 +0000258 ret = -EFAULT;
259 goto error_free_data_cpy;
260 }
Jonathan Camerona7348342011-05-18 14:40:55 +0100261
262 if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
263 ring->buf.stufftoread = 0;
264
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100265error_free_data_cpy:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000266 kfree(data);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100267error_ret:
Jonathan Camerond5857d62011-02-11 13:09:09 +0000268
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100269 return ret;
270}
Jonathan Cameronb4281732011-04-15 18:55:55 +0100271EXPORT_SYMBOL(iio_read_first_n_sw_rb);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100272
273int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
274{
275 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
276 return iio_store_to_sw_ring(ring, data, timestamp);
277}
278EXPORT_SYMBOL(iio_store_to_sw_rb);
279
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700280static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
281 unsigned char *data)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100282{
283 unsigned char *last_written_p_copy;
284
285 iio_mark_sw_rb_in_use(&ring->buf);
286again:
287 barrier();
288 last_written_p_copy = ring->last_written_p;
289 barrier(); /*unnessecary? */
290 /* Check there is anything here */
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700291 if (last_written_p_copy == NULL)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100292 return -EAGAIN;
Manuel Stahlffcab072010-08-31 11:32:50 +0200293 memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100294
Jonathan Cameron8474ddd2010-05-04 14:43:11 +0100295 if (unlikely(ring->last_written_p != last_written_p_copy))
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100296 goto again;
297
298 iio_unmark_sw_rb_in_use(&ring->buf);
299 return 0;
300}
301
302int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
303 unsigned char *data)
304{
305 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
306}
307EXPORT_SYMBOL(iio_read_last_from_sw_rb);
308
309int iio_request_update_sw_rb(struct iio_ring_buffer *r)
310{
311 int ret = 0;
312 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
313
Jonathan Camerona7348342011-05-18 14:40:55 +0100314 r->stufftoread = false;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100315 spin_lock(&ring->use_lock);
316 if (!ring->update_needed)
317 goto error_ret;
318 if (ring->use_count) {
319 ret = -EAGAIN;
320 goto error_ret;
321 }
322 __iio_free_sw_ring_buffer(ring);
Manuel Stahlffcab072010-08-31 11:32:50 +0200323 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000324 ring->buf.length);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100325error_ret:
326 spin_unlock(&ring->use_lock);
327 return ret;
328}
329EXPORT_SYMBOL(iio_request_update_sw_rb);
330
Manuel Stahlffcab072010-08-31 11:32:50 +0200331int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100332{
333 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
Manuel Stahlffcab072010-08-31 11:32:50 +0200334 return ring->buf.bytes_per_datum;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100335}
Manuel Stahlffcab072010-08-31 11:32:50 +0200336EXPORT_SYMBOL(iio_get_bytes_per_datum_sw_rb);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100337
Manuel Stahlffcab072010-08-31 11:32:50 +0200338int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd)
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100339{
Manuel Stahlffcab072010-08-31 11:32:50 +0200340 if (r->bytes_per_datum != bpd) {
341 r->bytes_per_datum = bpd;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100342 if (r->access.mark_param_change)
343 r->access.mark_param_change(r);
344 }
345 return 0;
346}
Manuel Stahlffcab072010-08-31 11:32:50 +0200347EXPORT_SYMBOL(iio_set_bytes_per_datum_sw_rb);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100348
349int iio_get_length_sw_rb(struct iio_ring_buffer *r)
350{
351 return r->length;
352}
353EXPORT_SYMBOL(iio_get_length_sw_rb);
354
355int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
356{
357 if (r->length != length) {
358 r->length = length;
359 if (r->access.mark_param_change)
360 r->access.mark_param_change(r);
361 }
362 return 0;
363}
364EXPORT_SYMBOL(iio_set_length_sw_rb);
365
366int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
367{
368 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
369 ring->update_needed = true;
370 return 0;
371}
372EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
373
374static void iio_sw_rb_release(struct device *dev)
375{
376 struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
Jonathan Cameron3feb0792011-05-18 14:40:57 +0100377 iio_ring_access_release(&r->dev);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100378 kfree(iio_to_sw_ring(r));
379}
380
381static IIO_RING_ENABLE_ATTR;
Manuel Stahlffcab072010-08-31 11:32:50 +0200382static IIO_RING_BYTES_PER_DATUM_ATTR;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100383static IIO_RING_LENGTH_ATTR;
384
385/* Standard set of ring buffer attributes */
386static struct attribute *iio_ring_attributes[] = {
387 &dev_attr_length.attr,
Manuel Stahlffcab072010-08-31 11:32:50 +0200388 &dev_attr_bytes_per_datum.attr,
389 &dev_attr_enable.attr,
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100390 NULL,
391};
392
393static struct attribute_group iio_ring_attribute_group = {
394 .attrs = iio_ring_attributes,
395};
396
Alan Cox3860dc82009-08-19 16:59:31 +0100397static const struct attribute_group *iio_ring_attribute_groups[] = {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100398 &iio_ring_attribute_group,
399 NULL
400};
401
402static struct device_type iio_sw_ring_type = {
403 .release = iio_sw_rb_release,
404 .groups = iio_ring_attribute_groups,
405};
406
407struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
408{
409 struct iio_ring_buffer *buf;
410 struct iio_sw_ring_buffer *ring;
411
412 ring = kzalloc(sizeof *ring, GFP_KERNEL);
413 if (!ring)
Greg Kroah-Hartman19ca92e2010-05-04 22:33:27 -0700414 return NULL;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100415 buf = &ring->buf;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100416 iio_ring_buffer_init(buf, indio_dev);
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000417 __iio_init_sw_ring_buffer(ring);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100418 buf->dev.type = &iio_sw_ring_type;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100419 buf->dev.parent = &indio_dev->dev;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100420 dev_set_drvdata(&buf->dev, (void *)buf);
421
422 return buf;
423}
424EXPORT_SYMBOL(iio_sw_rb_allocate);
425
426void iio_sw_rb_free(struct iio_ring_buffer *r)
427{
428 if (r)
429 iio_put_ring_buffer(r);
430}
431EXPORT_SYMBOL(iio_sw_rb_free);
Barry Songad577f82010-07-11 16:39:16 +0100432
433int iio_sw_ring_preenable(struct iio_dev *indio_dev)
434{
Manuel Stahlbf329632010-08-31 11:32:52 +0200435 struct iio_ring_buffer *ring = indio_dev->ring;
Barry Songad577f82010-07-11 16:39:16 +0100436 size_t size;
437 dev_dbg(&indio_dev->dev, "%s\n", __func__);
438 /* Check if there are any scan elements enabled, if not fail*/
Manuel Stahlbf329632010-08-31 11:32:52 +0200439 if (!(ring->scan_count || ring->scan_timestamp))
Barry Songad577f82010-07-11 16:39:16 +0100440 return -EINVAL;
Manuel Stahlbf329632010-08-31 11:32:52 +0200441 if (ring->scan_timestamp)
442 if (ring->scan_count)
Barry Songad577f82010-07-11 16:39:16 +0100443 /* Timestamp (aligned to s64) and data */
Manuel Stahlbf329632010-08-31 11:32:52 +0200444 size = (((ring->scan_count * ring->bpe)
Barry Songad577f82010-07-11 16:39:16 +0100445 + sizeof(s64) - 1)
446 & ~(sizeof(s64) - 1))
447 + sizeof(s64);
448 else /* Timestamp only */
449 size = sizeof(s64);
450 else /* Data only */
Manuel Stahlbf329632010-08-31 11:32:52 +0200451 size = ring->scan_count * ring->bpe;
452 ring->access.set_bytes_per_datum(ring, size);
Barry Songad577f82010-07-11 16:39:16 +0100453
454 return 0;
455}
456EXPORT_SYMBOL(iio_sw_ring_preenable);
457
Jonathan Cameron59883ba2010-07-11 16:39:18 +0100458void iio_sw_trigger_bh_to_ring(struct work_struct *work_s)
459{
460 struct iio_sw_ring_helper_state *st
461 = container_of(work_s, struct iio_sw_ring_helper_state,
462 work_trigger_to_ring);
Manuel Stahlbf329632010-08-31 11:32:52 +0200463 struct iio_ring_buffer *ring = st->indio_dev->ring;
Jonathan Cameron59883ba2010-07-11 16:39:18 +0100464 int len = 0;
Manuel Stahlbf329632010-08-31 11:32:52 +0200465 size_t datasize = ring->access.get_bytes_per_datum(ring);
Jonathan Cameron59883ba2010-07-11 16:39:18 +0100466 char *data = kmalloc(datasize, GFP_KERNEL);
467
468 if (data == NULL) {
469 dev_err(st->indio_dev->dev.parent,
470 "memory alloc failed in ring bh");
471 return;
472 }
473
Manuel Stahlbf329632010-08-31 11:32:52 +0200474 if (ring->scan_count)
Jonathan Cameron59883ba2010-07-11 16:39:18 +0100475 len = st->get_ring_element(st, data);
476
477 /* Guaranteed to be aligned with 8 byte boundary */
Manuel Stahlbf329632010-08-31 11:32:52 +0200478 if (ring->scan_timestamp)
Jonathan Cameron019415c2010-07-29 17:50:51 +0100479 *(s64 *)(((phys_addr_t)data + len
Jonathan Cameron59883ba2010-07-11 16:39:18 +0100480 + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
481 = st->last_timestamp;
Manuel Stahlbf329632010-08-31 11:32:52 +0200482 ring->access.store_to(ring,
483 (u8 *)data,
Jonathan Cameron59883ba2010-07-11 16:39:18 +0100484 st->last_timestamp);
485
486 iio_trigger_notify_done(st->indio_dev->trig);
487 kfree(data);
488
489 return;
490}
491EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring);
492
493void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time)
494{ struct iio_sw_ring_helper_state *h
495 = iio_dev_get_devdata(indio_dev);
496 h->last_timestamp = time;
497 schedule_work(&h->work_trigger_to_ring);
498}
499EXPORT_SYMBOL(iio_sw_poll_func_th);
500
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100501MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
502MODULE_LICENSE("GPL");