blob: e9570e33694e42c05ec39f775d0179c341e5681d [file] [log] [blame]
Jonathan Cameron2235acb2009-08-18 18:06:27 +01001/* The industrial I/O simple minimally locked ring buffer.
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010011#include <linux/kernel.h>
Jonathan Cameron2235acb2009-08-18 18:06:27 +010012#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/workqueue.h>
15#include "ring_sw.h"
16
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000017static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
18 int bytes_per_datum, int length)
Jonathan Cameron2235acb2009-08-18 18:06:27 +010019{
20 if ((length == 0) || (bytes_per_datum == 0))
21 return -EINVAL;
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000022 __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
Jonathan Cameron2235acb2009-08-18 18:06:27 +010023 ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
24 ring->read_p = 0;
25 ring->write_p = 0;
26 ring->last_written_p = 0;
27 ring->half_p = 0;
28 return ring->data ? 0 : -ENOMEM;
29}
30
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +000031static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
32{
33 spin_lock_init(&ring->use_lock);
34}
35
Jonathan Cameron2235acb2009-08-18 18:06:27 +010036static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
37{
38 kfree(ring->data);
39}
40
41void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
42{
43 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
44 spin_lock(&ring->use_lock);
45 ring->use_count++;
46 spin_unlock(&ring->use_lock);
47}
48EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
49
50void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
51{
52 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
53 spin_lock(&ring->use_lock);
54 ring->use_count--;
55 spin_unlock(&ring->use_lock);
56}
57EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
58
59
60/* Ring buffer related functionality */
61/* Store to ring is typically called in the bh of a data ready interrupt handler
62 * in the device driver */
63/* Lock always held if their is a chance this may be called */
64/* Only one of these per ring may run concurrently - enforced by drivers */
65int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
66 unsigned char *data,
67 s64 timestamp)
68{
69 int ret = 0;
70 int code;
71 unsigned char *temp_ptr, *change_test_ptr;
72
73 /* initial store */
74 if (unlikely(ring->write_p == 0)) {
75 ring->write_p = ring->data;
76 /* Doesn't actually matter if this is out of the set
77 * as long as the read pointer is valid before this
78 * passes it - guaranteed as set later in this function.
79 */
80 ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2;
81 }
82 /* Copy data to where ever the current write pointer says */
83 memcpy(ring->write_p, data, ring->buf.bpd);
84 barrier();
85 /* Update the pointer used to get most recent value.
86 * Always valid as either points to latest or second latest value.
87 * Before this runs it is null and read attempts fail with -EAGAIN.
88 */
89 ring->last_written_p = ring->write_p;
90 barrier();
91 /* temp_ptr used to ensure we never have an invalid pointer
92 * it may be slightly lagging, but never invalid
93 */
94 temp_ptr = ring->write_p + ring->buf.bpd;
95 /* End of ring, back to the beginning */
96 if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd)
97 temp_ptr = ring->data;
98 /* Update the write pointer
99 * always valid as long as this is the only function able to write.
100 * Care needed with smp systems to ensure more than one ring fill
101 * is never scheduled.
102 */
103 ring->write_p = temp_ptr;
104
105 if (ring->read_p == 0)
106 ring->read_p = ring->data;
107 /* Buffer full - move the read pointer and create / escalate
108 * ring event */
109 /* Tricky case - if the read pointer moves before we adjust it.
110 * Handle by not pushing if it has moved - may result in occasional
111 * unnecessary buffer full events when it wasn't quite true.
112 */
113 else if (ring->write_p == ring->read_p) {
114 change_test_ptr = ring->read_p;
115 temp_ptr = change_test_ptr + ring->buf.bpd;
116 if (temp_ptr
117 == ring->data + ring->buf.length*ring->buf.bpd) {
118 temp_ptr = ring->data;
119 }
120 /* We are moving pointer on one because the ring is full. Any
121 * change to the read pointer will be this or greater.
122 */
123 if (change_test_ptr == ring->read_p)
124 ring->read_p = temp_ptr;
125
126 spin_lock(&ring->buf.shared_ev_pointer.lock);
127
128 ret = iio_push_or_escallate_ring_event(&ring->buf,
Roel Van Nyenc849d252010-04-29 19:27:31 +0200129 IIO_EVENT_CODE_RING_100_FULL, timestamp);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100130 spin_unlock(&ring->buf.shared_ev_pointer.lock);
131 if (ret)
132 goto error_ret;
133 }
134 /* investigate if our event barrier has been passed */
135 /* There are definite 'issues' with this and chances of
136 * simultaneous read */
137 /* Also need to use loop count to ensure this only happens once */
138 ring->half_p += ring->buf.bpd;
139 if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd)
140 ring->half_p = ring->data;
141 if (ring->half_p == ring->read_p) {
142 spin_lock(&ring->buf.shared_ev_pointer.lock);
143 code = IIO_EVENT_CODE_RING_50_FULL;
144 ret = __iio_push_event(&ring->buf.ev_int,
145 code,
146 timestamp,
147 &ring->buf.shared_ev_pointer);
148 spin_unlock(&ring->buf.shared_ev_pointer.lock);
149 }
150error_ret:
151 return ret;
152}
153
154int iio_rip_sw_rb(struct iio_ring_buffer *r,
155 size_t count, u8 **data, int *dead_offset)
156{
157 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
158
159 u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
160 int ret, max_copied;
161 int bytes_to_rip;
162
163 /* A userspace program has probably made an error if it tries to
164 * read something that is not a whole number of bpds.
165 * Return an error.
166 */
167 if (count % ring->buf.bpd) {
168 ret = -EINVAL;
169 printk(KERN_INFO "Ring buffer read request not whole number of"
170 "samples: Request bytes %zd, Current bpd %d\n",
171 count, ring->buf.bpd);
172 goto error_ret;
173 }
174 /* Limit size to whole of ring buffer */
175 bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count);
176
177 *data = kmalloc(bytes_to_rip, GFP_KERNEL);
178 if (*data == NULL) {
179 ret = -ENOMEM;
180 goto error_ret;
181 }
182
183 /* build local copy */
184 initial_read_p = ring->read_p;
185 if (unlikely(initial_read_p == 0)) { /* No data here as yet */
186 ret = 0;
187 goto error_free_data_cpy;
188 }
189
190 initial_write_p = ring->write_p;
191
192 /* Need a consistent pair */
193 while ((initial_read_p != ring->read_p)
194 || (initial_write_p != ring->write_p)) {
195 initial_read_p = ring->read_p;
196 initial_write_p = ring->write_p;
197 }
198 if (initial_write_p == initial_read_p) {
199 /* No new data available.*/
200 ret = 0;
201 goto error_free_data_cpy;
202 }
203
204 if (initial_write_p >= initial_read_p + bytes_to_rip) {
205 /* write_p is greater than necessary, all is easy */
206 max_copied = bytes_to_rip;
207 memcpy(*data, initial_read_p, max_copied);
208 end_read_p = initial_read_p + max_copied;
209 } else if (initial_write_p > initial_read_p) {
210 /*not enough data to cpy */
211 max_copied = initial_write_p - initial_read_p;
212 memcpy(*data, initial_read_p, max_copied);
213 end_read_p = initial_write_p;
214 } else {
215 /* going through 'end' of ring buffer */
216 max_copied = ring->data
217 + ring->buf.length*ring->buf.bpd - initial_read_p;
218 memcpy(*data, initial_read_p, max_copied);
219 /* possible we are done if we align precisely with end */
220 if (max_copied == bytes_to_rip)
221 end_read_p = ring->data;
222 else if (initial_write_p
223 > ring->data + bytes_to_rip - max_copied) {
224 /* enough data to finish */
225 memcpy(*data + max_copied, ring->data,
226 bytes_to_rip - max_copied);
227 max_copied = bytes_to_rip;
228 end_read_p = ring->data + (bytes_to_rip - max_copied);
229 } else { /* not enough data */
230 memcpy(*data + max_copied, ring->data,
231 initial_write_p - ring->data);
232 max_copied += initial_write_p - ring->data;
233 end_read_p = initial_write_p;
234 }
235 }
236 /* Now to verify which section was cleanly copied - i.e. how far
237 * read pointer has been pushed */
238 current_read_p = ring->read_p;
239
240 if (initial_read_p <= current_read_p)
241 *dead_offset = current_read_p - initial_read_p;
242 else
243 *dead_offset = ring->buf.length*ring->buf.bpd
244 - (initial_read_p - current_read_p);
245
246 /* possible issue if the initial write has been lapped or indeed
247 * the point we were reading to has been passed */
248 /* No valid data read.
249 * In this case the read pointer is already correct having been
250 * pushed further than we would look. */
251 if (max_copied - *dead_offset < 0) {
252 ret = 0;
253 goto error_free_data_cpy;
254 }
255
256 /* setup the next read position */
257 /* Beware, this may fail due to concurrency fun and games.
258 * Possible that sufficient fill commands have run to push the read
259 * pointer past where we would be after the rip. If this occurs, leave
260 * it be.
261 */
262 /* Tricky - deal with loops */
263
264 while (ring->read_p != end_read_p)
265 ring->read_p = end_read_p;
266
267 return max_copied - *dead_offset;
268
269error_free_data_cpy:
270 kfree(*data);
271error_ret:
272 return ret;
273}
274EXPORT_SYMBOL(iio_rip_sw_rb);
275
276int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
277{
278 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
279 return iio_store_to_sw_ring(ring, data, timestamp);
280}
281EXPORT_SYMBOL(iio_store_to_sw_rb);
282
283int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
284 unsigned char *data)
285{
286 unsigned char *last_written_p_copy;
287
288 iio_mark_sw_rb_in_use(&ring->buf);
289again:
290 barrier();
291 last_written_p_copy = ring->last_written_p;
292 barrier(); /*unnessecary? */
293 /* Check there is anything here */
294 if (last_written_p_copy == 0)
295 return -EAGAIN;
296 memcpy(data, last_written_p_copy, ring->buf.bpd);
297
298 if (unlikely(ring->last_written_p >= last_written_p_copy))
299 goto again;
300
301 iio_unmark_sw_rb_in_use(&ring->buf);
302 return 0;
303}
304
305int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
306 unsigned char *data)
307{
308 return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
309}
310EXPORT_SYMBOL(iio_read_last_from_sw_rb);
311
312int iio_request_update_sw_rb(struct iio_ring_buffer *r)
313{
314 int ret = 0;
315 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
316
317 spin_lock(&ring->use_lock);
318 if (!ring->update_needed)
319 goto error_ret;
320 if (ring->use_count) {
321 ret = -EAGAIN;
322 goto error_ret;
323 }
324 __iio_free_sw_ring_buffer(ring);
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000325 ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd,
326 ring->buf.length);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100327error_ret:
328 spin_unlock(&ring->use_lock);
329 return ret;
330}
331EXPORT_SYMBOL(iio_request_update_sw_rb);
332
333int iio_get_bpd_sw_rb(struct iio_ring_buffer *r)
334{
335 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
336 return ring->buf.bpd;
337}
338EXPORT_SYMBOL(iio_get_bpd_sw_rb);
339
340int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd)
341{
342 if (r->bpd != bpd) {
343 r->bpd = bpd;
344 if (r->access.mark_param_change)
345 r->access.mark_param_change(r);
346 }
347 return 0;
348}
349EXPORT_SYMBOL(iio_set_bpd_sw_rb);
350
351int iio_get_length_sw_rb(struct iio_ring_buffer *r)
352{
353 return r->length;
354}
355EXPORT_SYMBOL(iio_get_length_sw_rb);
356
357int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
358{
359 if (r->length != length) {
360 r->length = length;
361 if (r->access.mark_param_change)
362 r->access.mark_param_change(r);
363 }
364 return 0;
365}
366EXPORT_SYMBOL(iio_set_length_sw_rb);
367
368int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
369{
370 struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
371 ring->update_needed = true;
372 return 0;
373}
374EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
375
376static void iio_sw_rb_release(struct device *dev)
377{
378 struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
379 kfree(iio_to_sw_ring(r));
380}
381
382static IIO_RING_ENABLE_ATTR;
383static IIO_RING_BPS_ATTR;
384static IIO_RING_LENGTH_ATTR;
385
386/* Standard set of ring buffer attributes */
387static struct attribute *iio_ring_attributes[] = {
388 &dev_attr_length.attr,
389 &dev_attr_bps.attr,
390 &dev_attr_ring_enable.attr,
391 NULL,
392};
393
394static struct attribute_group iio_ring_attribute_group = {
395 .attrs = iio_ring_attributes,
396};
397
Alan Cox3860dc82009-08-19 16:59:31 +0100398static const struct attribute_group *iio_ring_attribute_groups[] = {
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100399 &iio_ring_attribute_group,
400 NULL
401};
402
403static struct device_type iio_sw_ring_type = {
404 .release = iio_sw_rb_release,
405 .groups = iio_ring_attribute_groups,
406};
407
408struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
409{
410 struct iio_ring_buffer *buf;
411 struct iio_sw_ring_buffer *ring;
412
413 ring = kzalloc(sizeof *ring, GFP_KERNEL);
414 if (!ring)
415 return 0;
416 buf = &ring->buf;
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100417 iio_ring_buffer_init(buf, indio_dev);
Jonathan Cameron6f2dfb32010-03-02 13:35:35 +0000418 __iio_init_sw_ring_buffer(ring);
Jonathan Cameron2235acb2009-08-18 18:06:27 +0100419 buf->dev.type = &iio_sw_ring_type;
420 device_initialize(&buf->dev);
421 buf->dev.parent = &indio_dev->dev;
422 buf->dev.class = &iio_class;
423 dev_set_drvdata(&buf->dev, (void *)buf);
424
425 return buf;
426}
427EXPORT_SYMBOL(iio_sw_rb_allocate);
428
429void iio_sw_rb_free(struct iio_ring_buffer *r)
430{
431 if (r)
432 iio_put_ring_buffer(r);
433}
434EXPORT_SYMBOL(iio_sw_rb_free);
435MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
436MODULE_LICENSE("GPL");